1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 drbd_receiver.c 4 5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 6 7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 10 11 */ 12 13 14 #include <linux/module.h> 15 16 #include <linux/uaccess.h> 17 #include <net/sock.h> 18 19 #include <linux/drbd.h> 20 #include <linux/fs.h> 21 #include <linux/file.h> 22 #include <linux/in.h> 23 #include <linux/mm.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm_inline.h> 26 #include <linux/slab.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/sched/signal.h> 29 #include <linux/pkt_sched.h> 30 #define __KERNEL_SYSCALLS__ 31 #include <linux/unistd.h> 32 #include <linux/vmalloc.h> 33 #include <linux/random.h> 34 #include <linux/string.h> 35 #include <linux/scatterlist.h> 36 #include <linux/part_stat.h> 37 #include "drbd_int.h" 38 #include "drbd_protocol.h" 39 #include "drbd_req.h" 40 #include "drbd_vli.h" 41 42 #define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME|DRBD_FF_WZEROES) 43 44 struct packet_info { 45 enum drbd_packet cmd; 46 unsigned int size; 47 unsigned int vnr; 48 void *data; 49 }; 50 51 enum finish_epoch { 52 FE_STILL_LIVE, 53 FE_DESTROYED, 54 FE_RECYCLED, 55 }; 56 57 static int drbd_do_features(struct drbd_connection *connection); 58 static int drbd_do_auth(struct drbd_connection *connection); 59 static int drbd_disconnected(struct drbd_peer_device *); 60 static void conn_wait_active_ee_empty(struct drbd_connection *connection); 61 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event); 62 static int e_end_block(struct drbd_work *, int); 63 64 65 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 66 67 /* 68 * some helper functions to deal with single linked page lists, 69 * page->private being our "next" pointer. 70 */ 71 72 /* If at least n pages are linked at head, get n pages off. 73 * Otherwise, don't modify head, and return NULL. 74 * Locking is the responsibility of the caller. 75 */ 76 static struct page *page_chain_del(struct page **head, int n) 77 { 78 struct page *page; 79 struct page *tmp; 80 81 BUG_ON(!n); 82 BUG_ON(!head); 83 84 page = *head; 85 86 if (!page) 87 return NULL; 88 89 while (page) { 90 tmp = page_chain_next(page); 91 if (--n == 0) 92 break; /* found sufficient pages */ 93 if (tmp == NULL) 94 /* insufficient pages, don't use any of them. */ 95 return NULL; 96 page = tmp; 97 } 98 99 /* add end of list marker for the returned list */ 100 set_page_private(page, 0); 101 /* actual return value, and adjustment of head */ 102 page = *head; 103 *head = tmp; 104 return page; 105 } 106 107 /* may be used outside of locks to find the tail of a (usually short) 108 * "private" page chain, before adding it back to a global chain head 109 * with page_chain_add() under a spinlock. */ 110 static struct page *page_chain_tail(struct page *page, int *len) 111 { 112 struct page *tmp; 113 int i = 1; 114 while ((tmp = page_chain_next(page))) { 115 ++i; 116 page = tmp; 117 } 118 if (len) 119 *len = i; 120 return page; 121 } 122 123 static int page_chain_free(struct page *page) 124 { 125 struct page *tmp; 126 int i = 0; 127 page_chain_for_each_safe(page, tmp) { 128 put_page(page); 129 ++i; 130 } 131 return i; 132 } 133 134 static void page_chain_add(struct page **head, 135 struct page *chain_first, struct page *chain_last) 136 { 137 #if 1 138 struct page *tmp; 139 tmp = page_chain_tail(chain_first, NULL); 140 BUG_ON(tmp != chain_last); 141 #endif 142 143 /* add chain to head */ 144 set_page_private(chain_last, (unsigned long)*head); 145 *head = chain_first; 146 } 147 148 static struct page *__drbd_alloc_pages(struct drbd_device *device, 149 unsigned int number) 150 { 151 struct page *page = NULL; 152 struct page *tmp = NULL; 153 unsigned int i = 0; 154 155 /* Yes, testing drbd_pp_vacant outside the lock is racy. 156 * So what. It saves a spin_lock. */ 157 if (drbd_pp_vacant >= number) { 158 spin_lock(&drbd_pp_lock); 159 page = page_chain_del(&drbd_pp_pool, number); 160 if (page) 161 drbd_pp_vacant -= number; 162 spin_unlock(&drbd_pp_lock); 163 if (page) 164 return page; 165 } 166 167 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 168 * "criss-cross" setup, that might cause write-out on some other DRBD, 169 * which in turn might block on the other node at this very place. */ 170 for (i = 0; i < number; i++) { 171 tmp = alloc_page(GFP_TRY); 172 if (!tmp) 173 break; 174 set_page_private(tmp, (unsigned long)page); 175 page = tmp; 176 } 177 178 if (i == number) 179 return page; 180 181 /* Not enough pages immediately available this time. 182 * No need to jump around here, drbd_alloc_pages will retry this 183 * function "soon". */ 184 if (page) { 185 tmp = page_chain_tail(page, NULL); 186 spin_lock(&drbd_pp_lock); 187 page_chain_add(&drbd_pp_pool, page, tmp); 188 drbd_pp_vacant += i; 189 spin_unlock(&drbd_pp_lock); 190 } 191 return NULL; 192 } 193 194 static void reclaim_finished_net_peer_reqs(struct drbd_device *device, 195 struct list_head *to_be_freed) 196 { 197 struct drbd_peer_request *peer_req, *tmp; 198 199 /* The EEs are always appended to the end of the list. Since 200 they are sent in order over the wire, they have to finish 201 in order. As soon as we see the first not finished we can 202 stop to examine the list... */ 203 204 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { 205 if (drbd_peer_req_has_active_page(peer_req)) 206 break; 207 list_move(&peer_req->w.list, to_be_freed); 208 } 209 } 210 211 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device) 212 { 213 LIST_HEAD(reclaimed); 214 struct drbd_peer_request *peer_req, *t; 215 216 spin_lock_irq(&device->resource->req_lock); 217 reclaim_finished_net_peer_reqs(device, &reclaimed); 218 spin_unlock_irq(&device->resource->req_lock); 219 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 220 drbd_free_net_peer_req(device, peer_req); 221 } 222 223 static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) 224 { 225 struct drbd_peer_device *peer_device; 226 int vnr; 227 228 rcu_read_lock(); 229 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 230 struct drbd_device *device = peer_device->device; 231 if (!atomic_read(&device->pp_in_use_by_net)) 232 continue; 233 234 kref_get(&device->kref); 235 rcu_read_unlock(); 236 drbd_reclaim_net_peer_reqs(device); 237 kref_put(&device->kref, drbd_destroy_device); 238 rcu_read_lock(); 239 } 240 rcu_read_unlock(); 241 } 242 243 /** 244 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) 245 * @peer_device: DRBD device. 246 * @number: number of pages requested 247 * @retry: whether to retry, if not enough pages are available right now 248 * 249 * Tries to allocate number pages, first from our own page pool, then from 250 * the kernel. 251 * Possibly retry until DRBD frees sufficient pages somewhere else. 252 * 253 * If this allocation would exceed the max_buffers setting, we throttle 254 * allocation (schedule_timeout) to give the system some room to breathe. 255 * 256 * We do not use max-buffers as hard limit, because it could lead to 257 * congestion and further to a distributed deadlock during online-verify or 258 * (checksum based) resync, if the max-buffers, socket buffer sizes and 259 * resync-rate settings are mis-configured. 260 * 261 * Returns a page chain linked via page->private. 262 */ 263 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number, 264 bool retry) 265 { 266 struct drbd_device *device = peer_device->device; 267 struct page *page = NULL; 268 struct net_conf *nc; 269 DEFINE_WAIT(wait); 270 unsigned int mxb; 271 272 rcu_read_lock(); 273 nc = rcu_dereference(peer_device->connection->net_conf); 274 mxb = nc ? nc->max_buffers : 1000000; 275 rcu_read_unlock(); 276 277 if (atomic_read(&device->pp_in_use) < mxb) 278 page = __drbd_alloc_pages(device, number); 279 280 /* Try to keep the fast path fast, but occasionally we need 281 * to reclaim the pages we lended to the network stack. */ 282 if (page && atomic_read(&device->pp_in_use_by_net) > 512) 283 drbd_reclaim_net_peer_reqs(device); 284 285 while (page == NULL) { 286 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 287 288 drbd_reclaim_net_peer_reqs(device); 289 290 if (atomic_read(&device->pp_in_use) < mxb) { 291 page = __drbd_alloc_pages(device, number); 292 if (page) 293 break; 294 } 295 296 if (!retry) 297 break; 298 299 if (signal_pending(current)) { 300 drbd_warn(device, "drbd_alloc_pages interrupted!\n"); 301 break; 302 } 303 304 if (schedule_timeout(HZ/10) == 0) 305 mxb = UINT_MAX; 306 } 307 finish_wait(&drbd_pp_wait, &wait); 308 309 if (page) 310 atomic_add(number, &device->pp_in_use); 311 return page; 312 } 313 314 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. 315 * Is also used from inside an other spin_lock_irq(&resource->req_lock); 316 * Either links the page chain back to the global pool, 317 * or returns all pages to the system. */ 318 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) 319 { 320 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; 321 int i; 322 323 if (page == NULL) 324 return; 325 326 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) 327 i = page_chain_free(page); 328 else { 329 struct page *tmp; 330 tmp = page_chain_tail(page, &i); 331 spin_lock(&drbd_pp_lock); 332 page_chain_add(&drbd_pp_pool, page, tmp); 333 drbd_pp_vacant += i; 334 spin_unlock(&drbd_pp_lock); 335 } 336 i = atomic_sub_return(i, a); 337 if (i < 0) 338 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", 339 is_net ? "pp_in_use_by_net" : "pp_in_use", i); 340 wake_up(&drbd_pp_wait); 341 } 342 343 /* 344 You need to hold the req_lock: 345 _drbd_wait_ee_list_empty() 346 347 You must not have the req_lock: 348 drbd_free_peer_req() 349 drbd_alloc_peer_req() 350 drbd_free_peer_reqs() 351 drbd_ee_fix_bhs() 352 drbd_finish_peer_reqs() 353 drbd_clear_done_ee() 354 drbd_wait_ee_list_empty() 355 */ 356 357 /* normal: payload_size == request size (bi_size) 358 * w_same: payload_size == logical_block_size 359 * trim: payload_size == 0 */ 360 struct drbd_peer_request * 361 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, 362 unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local) 363 { 364 struct drbd_device *device = peer_device->device; 365 struct drbd_peer_request *peer_req; 366 struct page *page = NULL; 367 unsigned int nr_pages = PFN_UP(payload_size); 368 369 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) 370 return NULL; 371 372 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 373 if (!peer_req) { 374 if (!(gfp_mask & __GFP_NOWARN)) 375 drbd_err(device, "%s: allocation failed\n", __func__); 376 return NULL; 377 } 378 379 if (nr_pages) { 380 page = drbd_alloc_pages(peer_device, nr_pages, 381 gfpflags_allow_blocking(gfp_mask)); 382 if (!page) 383 goto fail; 384 } 385 386 memset(peer_req, 0, sizeof(*peer_req)); 387 INIT_LIST_HEAD(&peer_req->w.list); 388 drbd_clear_interval(&peer_req->i); 389 peer_req->i.size = request_size; 390 peer_req->i.sector = sector; 391 peer_req->submit_jif = jiffies; 392 peer_req->peer_device = peer_device; 393 peer_req->pages = page; 394 /* 395 * The block_id is opaque to the receiver. It is not endianness 396 * converted, and sent back to the sender unchanged. 397 */ 398 peer_req->block_id = id; 399 400 return peer_req; 401 402 fail: 403 mempool_free(peer_req, &drbd_ee_mempool); 404 return NULL; 405 } 406 407 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, 408 int is_net) 409 { 410 might_sleep(); 411 if (peer_req->flags & EE_HAS_DIGEST) 412 kfree(peer_req->digest); 413 drbd_free_pages(device, peer_req->pages, is_net); 414 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); 415 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 416 if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { 417 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; 418 drbd_al_complete_io(device, &peer_req->i); 419 } 420 mempool_free(peer_req, &drbd_ee_mempool); 421 } 422 423 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) 424 { 425 LIST_HEAD(work_list); 426 struct drbd_peer_request *peer_req, *t; 427 int count = 0; 428 int is_net = list == &device->net_ee; 429 430 spin_lock_irq(&device->resource->req_lock); 431 list_splice_init(list, &work_list); 432 spin_unlock_irq(&device->resource->req_lock); 433 434 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 435 __drbd_free_peer_req(device, peer_req, is_net); 436 count++; 437 } 438 return count; 439 } 440 441 /* 442 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier. 443 */ 444 static int drbd_finish_peer_reqs(struct drbd_device *device) 445 { 446 LIST_HEAD(work_list); 447 LIST_HEAD(reclaimed); 448 struct drbd_peer_request *peer_req, *t; 449 int err = 0; 450 451 spin_lock_irq(&device->resource->req_lock); 452 reclaim_finished_net_peer_reqs(device, &reclaimed); 453 list_splice_init(&device->done_ee, &work_list); 454 spin_unlock_irq(&device->resource->req_lock); 455 456 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 457 drbd_free_net_peer_req(device, peer_req); 458 459 /* possible callbacks here: 460 * e_end_block, and e_end_resync_block, e_send_superseded. 461 * all ignore the last argument. 462 */ 463 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 464 int err2; 465 466 /* list_del not necessary, next/prev members not touched */ 467 err2 = peer_req->w.cb(&peer_req->w, !!err); 468 if (!err) 469 err = err2; 470 drbd_free_peer_req(device, peer_req); 471 } 472 wake_up(&device->ee_wait); 473 474 return err; 475 } 476 477 static void _drbd_wait_ee_list_empty(struct drbd_device *device, 478 struct list_head *head) 479 { 480 DEFINE_WAIT(wait); 481 482 /* avoids spin_lock/unlock 483 * and calling prepare_to_wait in the fast path */ 484 while (!list_empty(head)) { 485 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 486 spin_unlock_irq(&device->resource->req_lock); 487 io_schedule(); 488 finish_wait(&device->ee_wait, &wait); 489 spin_lock_irq(&device->resource->req_lock); 490 } 491 } 492 493 static void drbd_wait_ee_list_empty(struct drbd_device *device, 494 struct list_head *head) 495 { 496 spin_lock_irq(&device->resource->req_lock); 497 _drbd_wait_ee_list_empty(device, head); 498 spin_unlock_irq(&device->resource->req_lock); 499 } 500 501 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) 502 { 503 struct kvec iov = { 504 .iov_base = buf, 505 .iov_len = size, 506 }; 507 struct msghdr msg = { 508 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 509 }; 510 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size); 511 return sock_recvmsg(sock, &msg, msg.msg_flags); 512 } 513 514 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size) 515 { 516 int rv; 517 518 rv = drbd_recv_short(connection->data.socket, buf, size, 0); 519 520 if (rv < 0) { 521 if (rv == -ECONNRESET) 522 drbd_info(connection, "sock was reset by peer\n"); 523 else if (rv != -ERESTARTSYS) 524 drbd_err(connection, "sock_recvmsg returned %d\n", rv); 525 } else if (rv == 0) { 526 if (test_bit(DISCONNECT_SENT, &connection->flags)) { 527 long t; 528 rcu_read_lock(); 529 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10; 530 rcu_read_unlock(); 531 532 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t); 533 534 if (t) 535 goto out; 536 } 537 drbd_info(connection, "sock was shut down by peer\n"); 538 } 539 540 if (rv != size) 541 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD); 542 543 out: 544 return rv; 545 } 546 547 static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size) 548 { 549 int err; 550 551 err = drbd_recv(connection, buf, size); 552 if (err != size) { 553 if (err >= 0) 554 err = -EIO; 555 } else 556 err = 0; 557 return err; 558 } 559 560 static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size) 561 { 562 int err; 563 564 err = drbd_recv_all(connection, buf, size); 565 if (err && !signal_pending(current)) 566 drbd_warn(connection, "short read (expected size %d)\n", (int)size); 567 return err; 568 } 569 570 /* quoting tcp(7): 571 * On individual connections, the socket buffer size must be set prior to the 572 * listen(2) or connect(2) calls in order to have it take effect. 573 * This is our wrapper to do so. 574 */ 575 static void drbd_setbufsize(struct socket *sock, unsigned int snd, 576 unsigned int rcv) 577 { 578 /* open coded SO_SNDBUF, SO_RCVBUF */ 579 if (snd) { 580 sock->sk->sk_sndbuf = snd; 581 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 582 } 583 if (rcv) { 584 sock->sk->sk_rcvbuf = rcv; 585 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 586 } 587 } 588 589 static struct socket *drbd_try_connect(struct drbd_connection *connection) 590 { 591 const char *what; 592 struct socket *sock; 593 struct sockaddr_in6 src_in6; 594 struct sockaddr_in6 peer_in6; 595 struct net_conf *nc; 596 int err, peer_addr_len, my_addr_len; 597 int sndbuf_size, rcvbuf_size, connect_int; 598 int disconnect_on_error = 1; 599 600 rcu_read_lock(); 601 nc = rcu_dereference(connection->net_conf); 602 if (!nc) { 603 rcu_read_unlock(); 604 return NULL; 605 } 606 sndbuf_size = nc->sndbuf_size; 607 rcvbuf_size = nc->rcvbuf_size; 608 connect_int = nc->connect_int; 609 rcu_read_unlock(); 610 611 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6)); 612 memcpy(&src_in6, &connection->my_addr, my_addr_len); 613 614 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6) 615 src_in6.sin6_port = 0; 616 else 617 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 618 619 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6)); 620 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len); 621 622 what = "sock_create_kern"; 623 err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family, 624 SOCK_STREAM, IPPROTO_TCP, &sock); 625 if (err < 0) { 626 sock = NULL; 627 goto out; 628 } 629 630 sock->sk->sk_rcvtimeo = 631 sock->sk->sk_sndtimeo = connect_int * HZ; 632 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size); 633 634 /* explicitly bind to the configured IP as source IP 635 * for the outgoing connections. 636 * This is needed for multihomed hosts and to be 637 * able to use lo: interfaces for drbd. 638 * Make sure to use 0 as port number, so linux selects 639 * a free one dynamically. 640 */ 641 what = "bind before connect"; 642 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len); 643 if (err < 0) 644 goto out; 645 646 /* connect may fail, peer not yet available. 647 * stay C_WF_CONNECTION, don't go Disconnecting! */ 648 disconnect_on_error = 0; 649 what = "connect"; 650 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0); 651 652 out: 653 if (err < 0) { 654 if (sock) { 655 sock_release(sock); 656 sock = NULL; 657 } 658 switch (-err) { 659 /* timeout, busy, signal pending */ 660 case ETIMEDOUT: case EAGAIN: case EINPROGRESS: 661 case EINTR: case ERESTARTSYS: 662 /* peer not (yet) available, network problem */ 663 case ECONNREFUSED: case ENETUNREACH: 664 case EHOSTDOWN: case EHOSTUNREACH: 665 disconnect_on_error = 0; 666 break; 667 default: 668 drbd_err(connection, "%s failed, err = %d\n", what, err); 669 } 670 if (disconnect_on_error) 671 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 672 } 673 674 return sock; 675 } 676 677 struct accept_wait_data { 678 struct drbd_connection *connection; 679 struct socket *s_listen; 680 struct completion door_bell; 681 void (*original_sk_state_change)(struct sock *sk); 682 683 }; 684 685 static void drbd_incoming_connection(struct sock *sk) 686 { 687 struct accept_wait_data *ad = sk->sk_user_data; 688 void (*state_change)(struct sock *sk); 689 690 state_change = ad->original_sk_state_change; 691 if (sk->sk_state == TCP_ESTABLISHED) 692 complete(&ad->door_bell); 693 state_change(sk); 694 } 695 696 static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad) 697 { 698 int err, sndbuf_size, rcvbuf_size, my_addr_len; 699 struct sockaddr_in6 my_addr; 700 struct socket *s_listen; 701 struct net_conf *nc; 702 const char *what; 703 704 rcu_read_lock(); 705 nc = rcu_dereference(connection->net_conf); 706 if (!nc) { 707 rcu_read_unlock(); 708 return -EIO; 709 } 710 sndbuf_size = nc->sndbuf_size; 711 rcvbuf_size = nc->rcvbuf_size; 712 rcu_read_unlock(); 713 714 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6)); 715 memcpy(&my_addr, &connection->my_addr, my_addr_len); 716 717 what = "sock_create_kern"; 718 err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family, 719 SOCK_STREAM, IPPROTO_TCP, &s_listen); 720 if (err) { 721 s_listen = NULL; 722 goto out; 723 } 724 725 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 726 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size); 727 728 what = "bind before listen"; 729 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len); 730 if (err < 0) 731 goto out; 732 733 ad->s_listen = s_listen; 734 write_lock_bh(&s_listen->sk->sk_callback_lock); 735 ad->original_sk_state_change = s_listen->sk->sk_state_change; 736 s_listen->sk->sk_state_change = drbd_incoming_connection; 737 s_listen->sk->sk_user_data = ad; 738 write_unlock_bh(&s_listen->sk->sk_callback_lock); 739 740 what = "listen"; 741 err = s_listen->ops->listen(s_listen, 5); 742 if (err < 0) 743 goto out; 744 745 return 0; 746 out: 747 if (s_listen) 748 sock_release(s_listen); 749 if (err < 0) { 750 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 751 drbd_err(connection, "%s failed, err = %d\n", what, err); 752 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 753 } 754 } 755 756 return -EIO; 757 } 758 759 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad) 760 { 761 write_lock_bh(&sk->sk_callback_lock); 762 sk->sk_state_change = ad->original_sk_state_change; 763 sk->sk_user_data = NULL; 764 write_unlock_bh(&sk->sk_callback_lock); 765 } 766 767 static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad) 768 { 769 int timeo, connect_int, err = 0; 770 struct socket *s_estab = NULL; 771 struct net_conf *nc; 772 773 rcu_read_lock(); 774 nc = rcu_dereference(connection->net_conf); 775 if (!nc) { 776 rcu_read_unlock(); 777 return NULL; 778 } 779 connect_int = nc->connect_int; 780 rcu_read_unlock(); 781 782 timeo = connect_int * HZ; 783 /* 28.5% random jitter */ 784 timeo += get_random_u32_below(2) ? timeo / 7 : -timeo / 7; 785 786 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo); 787 if (err <= 0) 788 return NULL; 789 790 err = kernel_accept(ad->s_listen, &s_estab, 0); 791 if (err < 0) { 792 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 793 drbd_err(connection, "accept failed, err = %d\n", err); 794 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 795 } 796 } 797 798 if (s_estab) 799 unregister_state_change(s_estab->sk, ad); 800 801 return s_estab; 802 } 803 804 static int decode_header(struct drbd_connection *, void *, struct packet_info *); 805 806 static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock, 807 enum drbd_packet cmd) 808 { 809 if (!conn_prepare_command(connection, sock)) 810 return -EIO; 811 return conn_send_command(connection, sock, cmd, 0, NULL, 0); 812 } 813 814 static int receive_first_packet(struct drbd_connection *connection, struct socket *sock) 815 { 816 unsigned int header_size = drbd_header_size(connection); 817 struct packet_info pi; 818 struct net_conf *nc; 819 int err; 820 821 rcu_read_lock(); 822 nc = rcu_dereference(connection->net_conf); 823 if (!nc) { 824 rcu_read_unlock(); 825 return -EIO; 826 } 827 sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10; 828 rcu_read_unlock(); 829 830 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0); 831 if (err != header_size) { 832 if (err >= 0) 833 err = -EIO; 834 return err; 835 } 836 err = decode_header(connection, connection->data.rbuf, &pi); 837 if (err) 838 return err; 839 return pi.cmd; 840 } 841 842 /** 843 * drbd_socket_okay() - Free the socket if its connection is not okay 844 * @sock: pointer to the pointer to the socket. 845 */ 846 static bool drbd_socket_okay(struct socket **sock) 847 { 848 int rr; 849 char tb[4]; 850 851 if (!*sock) 852 return false; 853 854 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 855 856 if (rr > 0 || rr == -EAGAIN) { 857 return true; 858 } else { 859 sock_release(*sock); 860 *sock = NULL; 861 return false; 862 } 863 } 864 865 static bool connection_established(struct drbd_connection *connection, 866 struct socket **sock1, 867 struct socket **sock2) 868 { 869 struct net_conf *nc; 870 int timeout; 871 bool ok; 872 873 if (!*sock1 || !*sock2) 874 return false; 875 876 rcu_read_lock(); 877 nc = rcu_dereference(connection->net_conf); 878 timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10; 879 rcu_read_unlock(); 880 schedule_timeout_interruptible(timeout); 881 882 ok = drbd_socket_okay(sock1); 883 ok = drbd_socket_okay(sock2) && ok; 884 885 return ok; 886 } 887 888 /* Gets called if a connection is established, or if a new minor gets created 889 in a connection */ 890 int drbd_connected(struct drbd_peer_device *peer_device) 891 { 892 struct drbd_device *device = peer_device->device; 893 int err; 894 895 atomic_set(&device->packet_seq, 0); 896 device->peer_seq = 0; 897 898 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ? 899 &peer_device->connection->cstate_mutex : 900 &device->own_state_mutex; 901 902 err = drbd_send_sync_param(peer_device); 903 if (!err) 904 err = drbd_send_sizes(peer_device, 0, 0); 905 if (!err) 906 err = drbd_send_uuids(peer_device); 907 if (!err) 908 err = drbd_send_current_state(peer_device); 909 clear_bit(USE_DEGR_WFC_T, &device->flags); 910 clear_bit(RESIZE_PENDING, &device->flags); 911 atomic_set(&device->ap_in_flight, 0); 912 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */ 913 return err; 914 } 915 916 /* 917 * return values: 918 * 1 yes, we have a valid connection 919 * 0 oops, did not work out, please try again 920 * -1 peer talks different language, 921 * no point in trying again, please go standalone. 922 * -2 We do not have a network config... 923 */ 924 static int conn_connect(struct drbd_connection *connection) 925 { 926 struct drbd_socket sock, msock; 927 struct drbd_peer_device *peer_device; 928 struct net_conf *nc; 929 int vnr, timeout, h; 930 bool discard_my_data, ok; 931 enum drbd_state_rv rv; 932 struct accept_wait_data ad = { 933 .connection = connection, 934 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell), 935 }; 936 937 clear_bit(DISCONNECT_SENT, &connection->flags); 938 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS) 939 return -2; 940 941 mutex_init(&sock.mutex); 942 sock.sbuf = connection->data.sbuf; 943 sock.rbuf = connection->data.rbuf; 944 sock.socket = NULL; 945 mutex_init(&msock.mutex); 946 msock.sbuf = connection->meta.sbuf; 947 msock.rbuf = connection->meta.rbuf; 948 msock.socket = NULL; 949 950 /* Assume that the peer only understands protocol 80 until we know better. */ 951 connection->agreed_pro_version = 80; 952 953 if (prepare_listen_socket(connection, &ad)) 954 return 0; 955 956 do { 957 struct socket *s; 958 959 s = drbd_try_connect(connection); 960 if (s) { 961 if (!sock.socket) { 962 sock.socket = s; 963 send_first_packet(connection, &sock, P_INITIAL_DATA); 964 } else if (!msock.socket) { 965 clear_bit(RESOLVE_CONFLICTS, &connection->flags); 966 msock.socket = s; 967 send_first_packet(connection, &msock, P_INITIAL_META); 968 } else { 969 drbd_err(connection, "Logic error in conn_connect()\n"); 970 goto out_release_sockets; 971 } 972 } 973 974 if (connection_established(connection, &sock.socket, &msock.socket)) 975 break; 976 977 retry: 978 s = drbd_wait_for_connect(connection, &ad); 979 if (s) { 980 int fp = receive_first_packet(connection, s); 981 drbd_socket_okay(&sock.socket); 982 drbd_socket_okay(&msock.socket); 983 switch (fp) { 984 case P_INITIAL_DATA: 985 if (sock.socket) { 986 drbd_warn(connection, "initial packet S crossed\n"); 987 sock_release(sock.socket); 988 sock.socket = s; 989 goto randomize; 990 } 991 sock.socket = s; 992 break; 993 case P_INITIAL_META: 994 set_bit(RESOLVE_CONFLICTS, &connection->flags); 995 if (msock.socket) { 996 drbd_warn(connection, "initial packet M crossed\n"); 997 sock_release(msock.socket); 998 msock.socket = s; 999 goto randomize; 1000 } 1001 msock.socket = s; 1002 break; 1003 default: 1004 drbd_warn(connection, "Error receiving initial packet\n"); 1005 sock_release(s); 1006 randomize: 1007 if (get_random_u32_below(2)) 1008 goto retry; 1009 } 1010 } 1011 1012 if (connection->cstate <= C_DISCONNECTING) 1013 goto out_release_sockets; 1014 if (signal_pending(current)) { 1015 flush_signals(current); 1016 smp_rmb(); 1017 if (get_t_state(&connection->receiver) == EXITING) 1018 goto out_release_sockets; 1019 } 1020 1021 ok = connection_established(connection, &sock.socket, &msock.socket); 1022 } while (!ok); 1023 1024 if (ad.s_listen) 1025 sock_release(ad.s_listen); 1026 1027 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 1028 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 1029 1030 sock.socket->sk->sk_allocation = GFP_NOIO; 1031 msock.socket->sk->sk_allocation = GFP_NOIO; 1032 1033 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; 1034 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE; 1035 1036 /* NOT YET ... 1037 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10; 1038 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1039 * first set it to the P_CONNECTION_FEATURES timeout, 1040 * which we set to 4x the configured ping_timeout. */ 1041 rcu_read_lock(); 1042 nc = rcu_dereference(connection->net_conf); 1043 1044 sock.socket->sk->sk_sndtimeo = 1045 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10; 1046 1047 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ; 1048 timeout = nc->timeout * HZ / 10; 1049 discard_my_data = nc->discard_my_data; 1050 rcu_read_unlock(); 1051 1052 msock.socket->sk->sk_sndtimeo = timeout; 1053 1054 /* we don't want delays. 1055 * we use TCP_CORK where appropriate, though */ 1056 tcp_sock_set_nodelay(sock.socket->sk); 1057 tcp_sock_set_nodelay(msock.socket->sk); 1058 1059 connection->data.socket = sock.socket; 1060 connection->meta.socket = msock.socket; 1061 connection->last_received = jiffies; 1062 1063 h = drbd_do_features(connection); 1064 if (h <= 0) 1065 return h; 1066 1067 if (connection->cram_hmac_tfm) { 1068 /* drbd_request_state(device, NS(conn, WFAuth)); */ 1069 switch (drbd_do_auth(connection)) { 1070 case -1: 1071 drbd_err(connection, "Authentication of peer failed\n"); 1072 return -1; 1073 case 0: 1074 drbd_err(connection, "Authentication of peer failed, trying again.\n"); 1075 return 0; 1076 } 1077 } 1078 1079 connection->data.socket->sk->sk_sndtimeo = timeout; 1080 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1081 1082 if (drbd_send_protocol(connection) == -EOPNOTSUPP) 1083 return -1; 1084 1085 /* Prevent a race between resync-handshake and 1086 * being promoted to Primary. 1087 * 1088 * Grab and release the state mutex, so we know that any current 1089 * drbd_set_role() is finished, and any incoming drbd_set_role 1090 * will see the STATE_SENT flag, and wait for it to be cleared. 1091 */ 1092 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 1093 mutex_lock(peer_device->device->state_mutex); 1094 1095 /* avoid a race with conn_request_state( C_DISCONNECTING ) */ 1096 spin_lock_irq(&connection->resource->req_lock); 1097 set_bit(STATE_SENT, &connection->flags); 1098 spin_unlock_irq(&connection->resource->req_lock); 1099 1100 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 1101 mutex_unlock(peer_device->device->state_mutex); 1102 1103 rcu_read_lock(); 1104 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1105 struct drbd_device *device = peer_device->device; 1106 kref_get(&device->kref); 1107 rcu_read_unlock(); 1108 1109 if (discard_my_data) 1110 set_bit(DISCARD_MY_DATA, &device->flags); 1111 else 1112 clear_bit(DISCARD_MY_DATA, &device->flags); 1113 1114 drbd_connected(peer_device); 1115 kref_put(&device->kref, drbd_destroy_device); 1116 rcu_read_lock(); 1117 } 1118 rcu_read_unlock(); 1119 1120 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE); 1121 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) { 1122 clear_bit(STATE_SENT, &connection->flags); 1123 return 0; 1124 } 1125 1126 drbd_thread_start(&connection->ack_receiver); 1127 /* opencoded create_singlethread_workqueue(), 1128 * to be able to use format string arguments */ 1129 connection->ack_sender = 1130 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name); 1131 if (!connection->ack_sender) { 1132 drbd_err(connection, "Failed to create workqueue ack_sender\n"); 1133 return 0; 1134 } 1135 1136 mutex_lock(&connection->resource->conf_update); 1137 /* The discard_my_data flag is a single-shot modifier to the next 1138 * connection attempt, the handshake of which is now well underway. 1139 * No need for rcu style copying of the whole struct 1140 * just to clear a single value. */ 1141 connection->net_conf->discard_my_data = 0; 1142 mutex_unlock(&connection->resource->conf_update); 1143 1144 return h; 1145 1146 out_release_sockets: 1147 if (ad.s_listen) 1148 sock_release(ad.s_listen); 1149 if (sock.socket) 1150 sock_release(sock.socket); 1151 if (msock.socket) 1152 sock_release(msock.socket); 1153 return -1; 1154 } 1155 1156 static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi) 1157 { 1158 unsigned int header_size = drbd_header_size(connection); 1159 1160 if (header_size == sizeof(struct p_header100) && 1161 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) { 1162 struct p_header100 *h = header; 1163 if (h->pad != 0) { 1164 drbd_err(connection, "Header padding is not zero\n"); 1165 return -EINVAL; 1166 } 1167 pi->vnr = be16_to_cpu(h->volume); 1168 pi->cmd = be16_to_cpu(h->command); 1169 pi->size = be32_to_cpu(h->length); 1170 } else if (header_size == sizeof(struct p_header95) && 1171 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) { 1172 struct p_header95 *h = header; 1173 pi->cmd = be16_to_cpu(h->command); 1174 pi->size = be32_to_cpu(h->length); 1175 pi->vnr = 0; 1176 } else if (header_size == sizeof(struct p_header80) && 1177 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) { 1178 struct p_header80 *h = header; 1179 pi->cmd = be16_to_cpu(h->command); 1180 pi->size = be16_to_cpu(h->length); 1181 pi->vnr = 0; 1182 } else { 1183 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n", 1184 be32_to_cpu(*(__be32 *)header), 1185 connection->agreed_pro_version); 1186 return -EINVAL; 1187 } 1188 pi->data = header + header_size; 1189 return 0; 1190 } 1191 1192 static void drbd_unplug_all_devices(struct drbd_connection *connection) 1193 { 1194 if (current->plug == &connection->receiver_plug) { 1195 blk_finish_plug(&connection->receiver_plug); 1196 blk_start_plug(&connection->receiver_plug); 1197 } /* else: maybe just schedule() ?? */ 1198 } 1199 1200 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi) 1201 { 1202 void *buffer = connection->data.rbuf; 1203 int err; 1204 1205 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection)); 1206 if (err) 1207 return err; 1208 1209 err = decode_header(connection, buffer, pi); 1210 connection->last_received = jiffies; 1211 1212 return err; 1213 } 1214 1215 static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi) 1216 { 1217 void *buffer = connection->data.rbuf; 1218 unsigned int size = drbd_header_size(connection); 1219 int err; 1220 1221 err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT); 1222 if (err != size) { 1223 /* If we have nothing in the receive buffer now, to reduce 1224 * application latency, try to drain the backend queues as 1225 * quickly as possible, and let remote TCP know what we have 1226 * received so far. */ 1227 if (err == -EAGAIN) { 1228 tcp_sock_set_quickack(connection->data.socket->sk, 2); 1229 drbd_unplug_all_devices(connection); 1230 } 1231 if (err > 0) { 1232 buffer += err; 1233 size -= err; 1234 } 1235 err = drbd_recv_all_warn(connection, buffer, size); 1236 if (err) 1237 return err; 1238 } 1239 1240 err = decode_header(connection, connection->data.rbuf, pi); 1241 connection->last_received = jiffies; 1242 1243 return err; 1244 } 1245 /* This is blkdev_issue_flush, but asynchronous. 1246 * We want to submit to all component volumes in parallel, 1247 * then wait for all completions. 1248 */ 1249 struct issue_flush_context { 1250 atomic_t pending; 1251 int error; 1252 struct completion done; 1253 }; 1254 struct one_flush_context { 1255 struct drbd_device *device; 1256 struct issue_flush_context *ctx; 1257 }; 1258 1259 static void one_flush_endio(struct bio *bio) 1260 { 1261 struct one_flush_context *octx = bio->bi_private; 1262 struct drbd_device *device = octx->device; 1263 struct issue_flush_context *ctx = octx->ctx; 1264 1265 if (bio->bi_status) { 1266 ctx->error = blk_status_to_errno(bio->bi_status); 1267 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status); 1268 } 1269 kfree(octx); 1270 bio_put(bio); 1271 1272 clear_bit(FLUSH_PENDING, &device->flags); 1273 put_ldev(device); 1274 kref_put(&device->kref, drbd_destroy_device); 1275 1276 if (atomic_dec_and_test(&ctx->pending)) 1277 complete(&ctx->done); 1278 } 1279 1280 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) 1281 { 1282 struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0, 1283 REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO); 1284 struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); 1285 1286 if (!octx) { 1287 drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n"); 1288 /* FIXME: what else can I do now? disconnecting or detaching 1289 * really does not help to improve the state of the world, either. 1290 */ 1291 bio_put(bio); 1292 1293 ctx->error = -ENOMEM; 1294 put_ldev(device); 1295 kref_put(&device->kref, drbd_destroy_device); 1296 return; 1297 } 1298 1299 octx->device = device; 1300 octx->ctx = ctx; 1301 bio->bi_private = octx; 1302 bio->bi_end_io = one_flush_endio; 1303 1304 device->flush_jif = jiffies; 1305 set_bit(FLUSH_PENDING, &device->flags); 1306 atomic_inc(&ctx->pending); 1307 submit_bio(bio); 1308 } 1309 1310 static void drbd_flush(struct drbd_connection *connection) 1311 { 1312 if (connection->resource->write_ordering >= WO_BDEV_FLUSH) { 1313 struct drbd_peer_device *peer_device; 1314 struct issue_flush_context ctx; 1315 int vnr; 1316 1317 atomic_set(&ctx.pending, 1); 1318 ctx.error = 0; 1319 init_completion(&ctx.done); 1320 1321 rcu_read_lock(); 1322 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1323 struct drbd_device *device = peer_device->device; 1324 1325 if (!get_ldev(device)) 1326 continue; 1327 kref_get(&device->kref); 1328 rcu_read_unlock(); 1329 1330 submit_one_flush(device, &ctx); 1331 1332 rcu_read_lock(); 1333 } 1334 rcu_read_unlock(); 1335 1336 /* Do we want to add a timeout, 1337 * if disk-timeout is set? */ 1338 if (!atomic_dec_and_test(&ctx.pending)) 1339 wait_for_completion(&ctx.done); 1340 1341 if (ctx.error) { 1342 /* would rather check on EOPNOTSUPP, but that is not reliable. 1343 * don't try again for ANY return value != 0 1344 * if (rv == -EOPNOTSUPP) */ 1345 /* Any error is already reported by bio_endio callback. */ 1346 drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO); 1347 } 1348 } 1349 } 1350 1351 /** 1352 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. 1353 * @connection: DRBD connection. 1354 * @epoch: Epoch object. 1355 * @ev: Epoch event. 1356 */ 1357 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection, 1358 struct drbd_epoch *epoch, 1359 enum epoch_event ev) 1360 { 1361 int epoch_size; 1362 struct drbd_epoch *next_epoch; 1363 enum finish_epoch rv = FE_STILL_LIVE; 1364 1365 spin_lock(&connection->epoch_lock); 1366 do { 1367 next_epoch = NULL; 1368 1369 epoch_size = atomic_read(&epoch->epoch_size); 1370 1371 switch (ev & ~EV_CLEANUP) { 1372 case EV_PUT: 1373 atomic_dec(&epoch->active); 1374 break; 1375 case EV_GOT_BARRIER_NR: 1376 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1377 break; 1378 case EV_BECAME_LAST: 1379 /* nothing to do*/ 1380 break; 1381 } 1382 1383 if (epoch_size != 0 && 1384 atomic_read(&epoch->active) == 0 && 1385 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { 1386 if (!(ev & EV_CLEANUP)) { 1387 spin_unlock(&connection->epoch_lock); 1388 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size); 1389 spin_lock(&connection->epoch_lock); 1390 } 1391 #if 0 1392 /* FIXME: dec unacked on connection, once we have 1393 * something to count pending connection packets in. */ 1394 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) 1395 dec_unacked(epoch->connection); 1396 #endif 1397 1398 if (connection->current_epoch != epoch) { 1399 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1400 list_del(&epoch->list); 1401 ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1402 connection->epochs--; 1403 kfree(epoch); 1404 1405 if (rv == FE_STILL_LIVE) 1406 rv = FE_DESTROYED; 1407 } else { 1408 epoch->flags = 0; 1409 atomic_set(&epoch->epoch_size, 0); 1410 /* atomic_set(&epoch->active, 0); is already zero */ 1411 if (rv == FE_STILL_LIVE) 1412 rv = FE_RECYCLED; 1413 } 1414 } 1415 1416 if (!next_epoch) 1417 break; 1418 1419 epoch = next_epoch; 1420 } while (1); 1421 1422 spin_unlock(&connection->epoch_lock); 1423 1424 return rv; 1425 } 1426 1427 static enum write_ordering_e 1428 max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo) 1429 { 1430 struct disk_conf *dc; 1431 1432 dc = rcu_dereference(bdev->disk_conf); 1433 1434 if (wo == WO_BDEV_FLUSH && !dc->disk_flushes) 1435 wo = WO_DRAIN_IO; 1436 if (wo == WO_DRAIN_IO && !dc->disk_drain) 1437 wo = WO_NONE; 1438 1439 return wo; 1440 } 1441 1442 /* 1443 * drbd_bump_write_ordering() - Fall back to an other write ordering method 1444 * @wo: Write ordering method to try. 1445 */ 1446 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, 1447 enum write_ordering_e wo) 1448 { 1449 struct drbd_device *device; 1450 enum write_ordering_e pwo; 1451 int vnr; 1452 static char *write_ordering_str[] = { 1453 [WO_NONE] = "none", 1454 [WO_DRAIN_IO] = "drain", 1455 [WO_BDEV_FLUSH] = "flush", 1456 }; 1457 1458 pwo = resource->write_ordering; 1459 if (wo != WO_BDEV_FLUSH) 1460 wo = min(pwo, wo); 1461 rcu_read_lock(); 1462 idr_for_each_entry(&resource->devices, device, vnr) { 1463 if (get_ldev(device)) { 1464 wo = max_allowed_wo(device->ldev, wo); 1465 if (device->ldev == bdev) 1466 bdev = NULL; 1467 put_ldev(device); 1468 } 1469 } 1470 1471 if (bdev) 1472 wo = max_allowed_wo(bdev, wo); 1473 1474 rcu_read_unlock(); 1475 1476 resource->write_ordering = wo; 1477 if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH) 1478 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); 1479 } 1480 1481 /* 1482 * Mapping "discard" to ZEROOUT with UNMAP does not work for us: 1483 * Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it 1484 * will directly go to fallback mode, submitting normal writes, and 1485 * never even try to UNMAP. 1486 * 1487 * And dm-thin does not do this (yet), mostly because in general it has 1488 * to assume that "skip_block_zeroing" is set. See also: 1489 * https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html 1490 * https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html 1491 * 1492 * We *may* ignore the discard-zeroes-data setting, if so configured. 1493 * 1494 * Assumption is that this "discard_zeroes_data=0" is only because the backend 1495 * may ignore partial unaligned discards. 1496 * 1497 * LVM/DM thin as of at least 1498 * LVM version: 2.02.115(2)-RHEL7 (2015-01-28) 1499 * Library version: 1.02.93-RHEL7 (2015-01-28) 1500 * Driver version: 4.29.0 1501 * still behaves this way. 1502 * 1503 * For unaligned (wrt. alignment and granularity) or too small discards, 1504 * we zero-out the initial (and/or) trailing unaligned partial chunks, 1505 * but discard all the aligned full chunks. 1506 * 1507 * At least for LVM/DM thin, with skip_block_zeroing=false, 1508 * the result is effectively "discard_zeroes_data=1". 1509 */ 1510 /* flags: EE_TRIM|EE_ZEROOUT */ 1511 int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags) 1512 { 1513 struct block_device *bdev = device->ldev->backing_bdev; 1514 sector_t tmp, nr; 1515 unsigned int max_discard_sectors, granularity; 1516 int alignment; 1517 int err = 0; 1518 1519 if ((flags & EE_ZEROOUT) || !(flags & EE_TRIM)) 1520 goto zero_out; 1521 1522 /* Zero-sector (unknown) and one-sector granularities are the same. */ 1523 granularity = max(bdev_discard_granularity(bdev) >> 9, 1U); 1524 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; 1525 1526 max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22)); 1527 max_discard_sectors -= max_discard_sectors % granularity; 1528 if (unlikely(!max_discard_sectors)) 1529 goto zero_out; 1530 1531 if (nr_sectors < granularity) 1532 goto zero_out; 1533 1534 tmp = start; 1535 if (sector_div(tmp, granularity) != alignment) { 1536 if (nr_sectors < 2*granularity) 1537 goto zero_out; 1538 /* start + gran - (start + gran - align) % gran */ 1539 tmp = start + granularity - alignment; 1540 tmp = start + granularity - sector_div(tmp, granularity); 1541 1542 nr = tmp - start; 1543 /* don't flag BLKDEV_ZERO_NOUNMAP, we don't know how many 1544 * layers are below us, some may have smaller granularity */ 1545 err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0); 1546 nr_sectors -= nr; 1547 start = tmp; 1548 } 1549 while (nr_sectors >= max_discard_sectors) { 1550 err |= blkdev_issue_discard(bdev, start, max_discard_sectors, 1551 GFP_NOIO); 1552 nr_sectors -= max_discard_sectors; 1553 start += max_discard_sectors; 1554 } 1555 if (nr_sectors) { 1556 /* max_discard_sectors is unsigned int (and a multiple of 1557 * granularity, we made sure of that above already); 1558 * nr is < max_discard_sectors; 1559 * I don't need sector_div here, even though nr is sector_t */ 1560 nr = nr_sectors; 1561 nr -= (unsigned int)nr % granularity; 1562 if (nr) { 1563 err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO); 1564 nr_sectors -= nr; 1565 start += nr; 1566 } 1567 } 1568 zero_out: 1569 if (nr_sectors) { 1570 err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 1571 (flags & EE_TRIM) ? 0 : BLKDEV_ZERO_NOUNMAP); 1572 } 1573 return err != 0; 1574 } 1575 1576 static bool can_do_reliable_discards(struct drbd_device *device) 1577 { 1578 struct disk_conf *dc; 1579 bool can_do; 1580 1581 if (!bdev_max_discard_sectors(device->ldev->backing_bdev)) 1582 return false; 1583 1584 rcu_read_lock(); 1585 dc = rcu_dereference(device->ldev->disk_conf); 1586 can_do = dc->discard_zeroes_if_aligned; 1587 rcu_read_unlock(); 1588 return can_do; 1589 } 1590 1591 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req) 1592 { 1593 /* If the backend cannot discard, or does not guarantee 1594 * read-back zeroes in discarded ranges, we fall back to 1595 * zero-out. Unless configuration specifically requested 1596 * otherwise. */ 1597 if (!can_do_reliable_discards(device)) 1598 peer_req->flags |= EE_ZEROOUT; 1599 1600 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, 1601 peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM))) 1602 peer_req->flags |= EE_WAS_ERROR; 1603 drbd_endio_write_sec_final(peer_req); 1604 } 1605 1606 static int peer_request_fault_type(struct drbd_peer_request *peer_req) 1607 { 1608 if (peer_req_op(peer_req) == REQ_OP_READ) { 1609 return peer_req->flags & EE_APPLICATION ? 1610 DRBD_FAULT_DT_RD : DRBD_FAULT_RS_RD; 1611 } else { 1612 return peer_req->flags & EE_APPLICATION ? 1613 DRBD_FAULT_DT_WR : DRBD_FAULT_RS_WR; 1614 } 1615 } 1616 1617 /** 1618 * drbd_submit_peer_request() 1619 * @peer_req: peer request 1620 * 1621 * May spread the pages to multiple bios, 1622 * depending on bio_add_page restrictions. 1623 * 1624 * Returns 0 if all bios have been submitted, 1625 * -ENOMEM if we could not allocate enough bios, 1626 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a 1627 * single page to an empty bio (which should never happen and likely indicates 1628 * that the lower level IO stack is in some way broken). This has been observed 1629 * on certain Xen deployments. 1630 */ 1631 /* TODO allocate from our own bio_set. */ 1632 int drbd_submit_peer_request(struct drbd_peer_request *peer_req) 1633 { 1634 struct drbd_device *device = peer_req->peer_device->device; 1635 struct bio *bios = NULL; 1636 struct bio *bio; 1637 struct page *page = peer_req->pages; 1638 sector_t sector = peer_req->i.sector; 1639 unsigned int data_size = peer_req->i.size; 1640 unsigned int n_bios = 0; 1641 unsigned int nr_pages = PFN_UP(data_size); 1642 1643 /* TRIM/DISCARD: for now, always use the helper function 1644 * blkdev_issue_zeroout(..., discard=true). 1645 * It's synchronous, but it does the right thing wrt. bio splitting. 1646 * Correctness first, performance later. Next step is to code an 1647 * asynchronous variant of the same. 1648 */ 1649 if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) { 1650 /* wait for all pending IO completions, before we start 1651 * zeroing things out. */ 1652 conn_wait_active_ee_empty(peer_req->peer_device->connection); 1653 /* add it to the active list now, 1654 * so we can find it to present it in debugfs */ 1655 peer_req->submit_jif = jiffies; 1656 peer_req->flags |= EE_SUBMITTED; 1657 1658 /* If this was a resync request from receive_rs_deallocated(), 1659 * it is already on the sync_ee list */ 1660 if (list_empty(&peer_req->w.list)) { 1661 spin_lock_irq(&device->resource->req_lock); 1662 list_add_tail(&peer_req->w.list, &device->active_ee); 1663 spin_unlock_irq(&device->resource->req_lock); 1664 } 1665 1666 drbd_issue_peer_discard_or_zero_out(device, peer_req); 1667 return 0; 1668 } 1669 1670 /* In most cases, we will only need one bio. But in case the lower 1671 * level restrictions happen to be different at this offset on this 1672 * side than those of the sending peer, we may need to submit the 1673 * request in more than one bio. 1674 * 1675 * Plain bio_alloc is good enough here, this is no DRBD internally 1676 * generated bio, but a bio allocated on behalf of the peer. 1677 */ 1678 next_bio: 1679 /* _DISCARD, _WRITE_ZEROES handled above. 1680 * REQ_OP_FLUSH (empty flush) not expected, 1681 * should have been mapped to a "drbd protocol barrier". 1682 * REQ_OP_SECURE_ERASE: I don't see how we could ever support that. 1683 */ 1684 if (!(peer_req_op(peer_req) == REQ_OP_WRITE || 1685 peer_req_op(peer_req) == REQ_OP_READ)) { 1686 drbd_err(device, "Invalid bio op received: 0x%x\n", peer_req->opf); 1687 return -EINVAL; 1688 } 1689 1690 bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO); 1691 /* > peer_req->i.sector, unless this is the first bio */ 1692 bio->bi_iter.bi_sector = sector; 1693 bio->bi_private = peer_req; 1694 bio->bi_end_io = drbd_peer_request_endio; 1695 1696 bio->bi_next = bios; 1697 bios = bio; 1698 ++n_bios; 1699 1700 page_chain_for_each(page) { 1701 unsigned len = min_t(unsigned, data_size, PAGE_SIZE); 1702 if (!bio_add_page(bio, page, len, 0)) 1703 goto next_bio; 1704 data_size -= len; 1705 sector += len >> 9; 1706 --nr_pages; 1707 } 1708 D_ASSERT(device, data_size == 0); 1709 D_ASSERT(device, page == NULL); 1710 1711 atomic_set(&peer_req->pending_bios, n_bios); 1712 /* for debugfs: update timestamp, mark as submitted */ 1713 peer_req->submit_jif = jiffies; 1714 peer_req->flags |= EE_SUBMITTED; 1715 do { 1716 bio = bios; 1717 bios = bios->bi_next; 1718 bio->bi_next = NULL; 1719 1720 drbd_submit_bio_noacct(device, peer_request_fault_type(peer_req), bio); 1721 } while (bios); 1722 return 0; 1723 } 1724 1725 static void drbd_remove_epoch_entry_interval(struct drbd_device *device, 1726 struct drbd_peer_request *peer_req) 1727 { 1728 struct drbd_interval *i = &peer_req->i; 1729 1730 drbd_remove_interval(&device->write_requests, i); 1731 drbd_clear_interval(i); 1732 1733 /* Wake up any processes waiting for this peer request to complete. */ 1734 if (i->waiting) 1735 wake_up(&device->misc_wait); 1736 } 1737 1738 static void conn_wait_active_ee_empty(struct drbd_connection *connection) 1739 { 1740 struct drbd_peer_device *peer_device; 1741 int vnr; 1742 1743 rcu_read_lock(); 1744 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1745 struct drbd_device *device = peer_device->device; 1746 1747 kref_get(&device->kref); 1748 rcu_read_unlock(); 1749 drbd_wait_ee_list_empty(device, &device->active_ee); 1750 kref_put(&device->kref, drbd_destroy_device); 1751 rcu_read_lock(); 1752 } 1753 rcu_read_unlock(); 1754 } 1755 1756 static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi) 1757 { 1758 int rv; 1759 struct p_barrier *p = pi->data; 1760 struct drbd_epoch *epoch; 1761 1762 /* FIXME these are unacked on connection, 1763 * not a specific (peer)device. 1764 */ 1765 connection->current_epoch->barrier_nr = p->barrier; 1766 connection->current_epoch->connection = connection; 1767 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR); 1768 1769 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1770 * the activity log, which means it would not be resynced in case the 1771 * R_PRIMARY crashes now. 1772 * Therefore we must send the barrier_ack after the barrier request was 1773 * completed. */ 1774 switch (connection->resource->write_ordering) { 1775 case WO_NONE: 1776 if (rv == FE_RECYCLED) 1777 return 0; 1778 1779 /* receiver context, in the writeout path of the other node. 1780 * avoid potential distributed deadlock */ 1781 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1782 if (epoch) 1783 break; 1784 else 1785 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n"); 1786 fallthrough; 1787 1788 case WO_BDEV_FLUSH: 1789 case WO_DRAIN_IO: 1790 conn_wait_active_ee_empty(connection); 1791 drbd_flush(connection); 1792 1793 if (atomic_read(&connection->current_epoch->epoch_size)) { 1794 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1795 if (epoch) 1796 break; 1797 } 1798 1799 return 0; 1800 default: 1801 drbd_err(connection, "Strangeness in connection->write_ordering %d\n", 1802 connection->resource->write_ordering); 1803 return -EIO; 1804 } 1805 1806 epoch->flags = 0; 1807 atomic_set(&epoch->epoch_size, 0); 1808 atomic_set(&epoch->active, 0); 1809 1810 spin_lock(&connection->epoch_lock); 1811 if (atomic_read(&connection->current_epoch->epoch_size)) { 1812 list_add(&epoch->list, &connection->current_epoch->list); 1813 connection->current_epoch = epoch; 1814 connection->epochs++; 1815 } else { 1816 /* The current_epoch got recycled while we allocated this one... */ 1817 kfree(epoch); 1818 } 1819 spin_unlock(&connection->epoch_lock); 1820 1821 return 0; 1822 } 1823 1824 /* quick wrapper in case payload size != request_size (write same) */ 1825 static void drbd_csum_ee_size(struct crypto_shash *h, 1826 struct drbd_peer_request *r, void *d, 1827 unsigned int payload_size) 1828 { 1829 unsigned int tmp = r->i.size; 1830 r->i.size = payload_size; 1831 drbd_csum_ee(h, r, d); 1832 r->i.size = tmp; 1833 } 1834 1835 /* used from receive_RSDataReply (recv_resync_read) 1836 * and from receive_Data. 1837 * data_size: actual payload ("data in") 1838 * for normal writes that is bi_size. 1839 * for discards, that is zero. 1840 * for write same, it is logical_block_size. 1841 * both trim and write same have the bi_size ("data len to be affected") 1842 * as extra argument in the packet header. 1843 */ 1844 static struct drbd_peer_request * 1845 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, 1846 struct packet_info *pi) __must_hold(local) 1847 { 1848 struct drbd_device *device = peer_device->device; 1849 const sector_t capacity = get_capacity(device->vdisk); 1850 struct drbd_peer_request *peer_req; 1851 struct page *page; 1852 int digest_size, err; 1853 unsigned int data_size = pi->size, ds; 1854 void *dig_in = peer_device->connection->int_dig_in; 1855 void *dig_vv = peer_device->connection->int_dig_vv; 1856 unsigned long *data; 1857 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; 1858 struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL; 1859 1860 digest_size = 0; 1861 if (!trim && peer_device->connection->peer_integrity_tfm) { 1862 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm); 1863 /* 1864 * FIXME: Receive the incoming digest into the receive buffer 1865 * here, together with its struct p_data? 1866 */ 1867 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); 1868 if (err) 1869 return NULL; 1870 data_size -= digest_size; 1871 } 1872 1873 /* assume request_size == data_size, but special case trim. */ 1874 ds = data_size; 1875 if (trim) { 1876 if (!expect(peer_device, data_size == 0)) 1877 return NULL; 1878 ds = be32_to_cpu(trim->size); 1879 } else if (zeroes) { 1880 if (!expect(peer_device, data_size == 0)) 1881 return NULL; 1882 ds = be32_to_cpu(zeroes->size); 1883 } 1884 1885 if (!expect(peer_device, IS_ALIGNED(ds, 512))) 1886 return NULL; 1887 if (trim || zeroes) { 1888 if (!expect(peer_device, ds <= (DRBD_MAX_BBIO_SECTORS << 9))) 1889 return NULL; 1890 } else if (!expect(peer_device, ds <= DRBD_MAX_BIO_SIZE)) 1891 return NULL; 1892 1893 /* even though we trust out peer, 1894 * we sometimes have to double check. */ 1895 if (sector + (ds>>9) > capacity) { 1896 drbd_err(device, "request from peer beyond end of local disk: " 1897 "capacity: %llus < sector: %llus + size: %u\n", 1898 (unsigned long long)capacity, 1899 (unsigned long long)sector, ds); 1900 return NULL; 1901 } 1902 1903 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1904 * "criss-cross" setup, that might cause write-out on some other DRBD, 1905 * which in turn might block on the other node at this very place. */ 1906 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); 1907 if (!peer_req) 1908 return NULL; 1909 1910 peer_req->flags |= EE_WRITE; 1911 if (trim) { 1912 peer_req->flags |= EE_TRIM; 1913 return peer_req; 1914 } 1915 if (zeroes) { 1916 peer_req->flags |= EE_ZEROOUT; 1917 return peer_req; 1918 } 1919 1920 /* receive payload size bytes into page chain */ 1921 ds = data_size; 1922 page = peer_req->pages; 1923 page_chain_for_each(page) { 1924 unsigned len = min_t(int, ds, PAGE_SIZE); 1925 data = kmap(page); 1926 err = drbd_recv_all_warn(peer_device->connection, data, len); 1927 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { 1928 drbd_err(device, "Fault injection: Corrupting data on receive\n"); 1929 data[0] = data[0] ^ (unsigned long)-1; 1930 } 1931 kunmap(page); 1932 if (err) { 1933 drbd_free_peer_req(device, peer_req); 1934 return NULL; 1935 } 1936 ds -= len; 1937 } 1938 1939 if (digest_size) { 1940 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size); 1941 if (memcmp(dig_in, dig_vv, digest_size)) { 1942 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n", 1943 (unsigned long long)sector, data_size); 1944 drbd_free_peer_req(device, peer_req); 1945 return NULL; 1946 } 1947 } 1948 device->recv_cnt += data_size >> 9; 1949 return peer_req; 1950 } 1951 1952 /* drbd_drain_block() just takes a data block 1953 * out of the socket input buffer, and discards it. 1954 */ 1955 static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size) 1956 { 1957 struct page *page; 1958 int err = 0; 1959 void *data; 1960 1961 if (!data_size) 1962 return 0; 1963 1964 page = drbd_alloc_pages(peer_device, 1, 1); 1965 1966 data = kmap(page); 1967 while (data_size) { 1968 unsigned int len = min_t(int, data_size, PAGE_SIZE); 1969 1970 err = drbd_recv_all_warn(peer_device->connection, data, len); 1971 if (err) 1972 break; 1973 data_size -= len; 1974 } 1975 kunmap(page); 1976 drbd_free_pages(peer_device->device, page, 0); 1977 return err; 1978 } 1979 1980 static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req, 1981 sector_t sector, int data_size) 1982 { 1983 struct bio_vec bvec; 1984 struct bvec_iter iter; 1985 struct bio *bio; 1986 int digest_size, err, expect; 1987 void *dig_in = peer_device->connection->int_dig_in; 1988 void *dig_vv = peer_device->connection->int_dig_vv; 1989 1990 digest_size = 0; 1991 if (peer_device->connection->peer_integrity_tfm) { 1992 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm); 1993 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); 1994 if (err) 1995 return err; 1996 data_size -= digest_size; 1997 } 1998 1999 /* optimistically update recv_cnt. if receiving fails below, 2000 * we disconnect anyways, and counters will be reset. */ 2001 peer_device->device->recv_cnt += data_size>>9; 2002 2003 bio = req->master_bio; 2004 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); 2005 2006 bio_for_each_segment(bvec, bio, iter) { 2007 void *mapped = bvec_kmap_local(&bvec); 2008 expect = min_t(int, data_size, bvec.bv_len); 2009 err = drbd_recv_all_warn(peer_device->connection, mapped, expect); 2010 kunmap_local(mapped); 2011 if (err) 2012 return err; 2013 data_size -= expect; 2014 } 2015 2016 if (digest_size) { 2017 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv); 2018 if (memcmp(dig_in, dig_vv, digest_size)) { 2019 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n"); 2020 return -EINVAL; 2021 } 2022 } 2023 2024 D_ASSERT(peer_device->device, data_size == 0); 2025 return 0; 2026 } 2027 2028 /* 2029 * e_end_resync_block() is called in ack_sender context via 2030 * drbd_finish_peer_reqs(). 2031 */ 2032 static int e_end_resync_block(struct drbd_work *w, int unused) 2033 { 2034 struct drbd_peer_request *peer_req = 2035 container_of(w, struct drbd_peer_request, w); 2036 struct drbd_peer_device *peer_device = peer_req->peer_device; 2037 struct drbd_device *device = peer_device->device; 2038 sector_t sector = peer_req->i.sector; 2039 int err; 2040 2041 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 2042 2043 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 2044 drbd_set_in_sync(device, sector, peer_req->i.size); 2045 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req); 2046 } else { 2047 /* Record failure to sync */ 2048 drbd_rs_failed_io(device, sector, peer_req->i.size); 2049 2050 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); 2051 } 2052 dec_unacked(device); 2053 2054 return err; 2055 } 2056 2057 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, 2058 struct packet_info *pi) __releases(local) 2059 { 2060 struct drbd_device *device = peer_device->device; 2061 struct drbd_peer_request *peer_req; 2062 2063 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); 2064 if (!peer_req) 2065 goto fail; 2066 2067 dec_rs_pending(device); 2068 2069 inc_unacked(device); 2070 /* corresponding dec_unacked() in e_end_resync_block() 2071 * respective _drbd_clear_done_ee */ 2072 2073 peer_req->w.cb = e_end_resync_block; 2074 peer_req->opf = REQ_OP_WRITE; 2075 peer_req->submit_jif = jiffies; 2076 2077 spin_lock_irq(&device->resource->req_lock); 2078 list_add_tail(&peer_req->w.list, &device->sync_ee); 2079 spin_unlock_irq(&device->resource->req_lock); 2080 2081 atomic_add(pi->size >> 9, &device->rs_sect_ev); 2082 if (drbd_submit_peer_request(peer_req) == 0) 2083 return 0; 2084 2085 /* don't care for the reason here */ 2086 drbd_err(device, "submit failed, triggering re-connect\n"); 2087 spin_lock_irq(&device->resource->req_lock); 2088 list_del(&peer_req->w.list); 2089 spin_unlock_irq(&device->resource->req_lock); 2090 2091 drbd_free_peer_req(device, peer_req); 2092 fail: 2093 put_ldev(device); 2094 return -EIO; 2095 } 2096 2097 static struct drbd_request * 2098 find_request(struct drbd_device *device, struct rb_root *root, u64 id, 2099 sector_t sector, bool missing_ok, const char *func) 2100 { 2101 struct drbd_request *req; 2102 2103 /* Request object according to our peer */ 2104 req = (struct drbd_request *)(unsigned long)id; 2105 if (drbd_contains_interval(root, sector, &req->i) && req->i.local) 2106 return req; 2107 if (!missing_ok) { 2108 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func, 2109 (unsigned long)id, (unsigned long long)sector); 2110 } 2111 return NULL; 2112 } 2113 2114 static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi) 2115 { 2116 struct drbd_peer_device *peer_device; 2117 struct drbd_device *device; 2118 struct drbd_request *req; 2119 sector_t sector; 2120 int err; 2121 struct p_data *p = pi->data; 2122 2123 peer_device = conn_peer_device(connection, pi->vnr); 2124 if (!peer_device) 2125 return -EIO; 2126 device = peer_device->device; 2127 2128 sector = be64_to_cpu(p->sector); 2129 2130 spin_lock_irq(&device->resource->req_lock); 2131 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); 2132 spin_unlock_irq(&device->resource->req_lock); 2133 if (unlikely(!req)) 2134 return -EIO; 2135 2136 err = recv_dless_read(peer_device, req, sector, pi->size); 2137 if (!err) 2138 req_mod(req, DATA_RECEIVED); 2139 /* else: nothing. handled from drbd_disconnect... 2140 * I don't think we may complete this just yet 2141 * in case we are "on-disconnect: freeze" */ 2142 2143 return err; 2144 } 2145 2146 static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi) 2147 { 2148 struct drbd_peer_device *peer_device; 2149 struct drbd_device *device; 2150 sector_t sector; 2151 int err; 2152 struct p_data *p = pi->data; 2153 2154 peer_device = conn_peer_device(connection, pi->vnr); 2155 if (!peer_device) 2156 return -EIO; 2157 device = peer_device->device; 2158 2159 sector = be64_to_cpu(p->sector); 2160 D_ASSERT(device, p->block_id == ID_SYNCER); 2161 2162 if (get_ldev(device)) { 2163 /* data is submitted to disk within recv_resync_read. 2164 * corresponding put_ldev done below on error, 2165 * or in drbd_peer_request_endio. */ 2166 err = recv_resync_read(peer_device, sector, pi); 2167 } else { 2168 if (drbd_ratelimit()) 2169 drbd_err(device, "Can not write resync data to local disk.\n"); 2170 2171 err = drbd_drain_block(peer_device, pi->size); 2172 2173 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); 2174 } 2175 2176 atomic_add(pi->size >> 9, &device->rs_sect_in); 2177 2178 return err; 2179 } 2180 2181 static void restart_conflicting_writes(struct drbd_device *device, 2182 sector_t sector, int size) 2183 { 2184 struct drbd_interval *i; 2185 struct drbd_request *req; 2186 2187 drbd_for_each_overlap(i, &device->write_requests, sector, size) { 2188 if (!i->local) 2189 continue; 2190 req = container_of(i, struct drbd_request, i); 2191 if (req->rq_state & RQ_LOCAL_PENDING || 2192 !(req->rq_state & RQ_POSTPONED)) 2193 continue; 2194 /* as it is RQ_POSTPONED, this will cause it to 2195 * be queued on the retry workqueue. */ 2196 __req_mod(req, CONFLICT_RESOLVED, NULL); 2197 } 2198 } 2199 2200 /* 2201 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs(). 2202 */ 2203 static int e_end_block(struct drbd_work *w, int cancel) 2204 { 2205 struct drbd_peer_request *peer_req = 2206 container_of(w, struct drbd_peer_request, w); 2207 struct drbd_peer_device *peer_device = peer_req->peer_device; 2208 struct drbd_device *device = peer_device->device; 2209 sector_t sector = peer_req->i.sector; 2210 int err = 0, pcmd; 2211 2212 if (peer_req->flags & EE_SEND_WRITE_ACK) { 2213 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 2214 pcmd = (device->state.conn >= C_SYNC_SOURCE && 2215 device->state.conn <= C_PAUSED_SYNC_T && 2216 peer_req->flags & EE_MAY_SET_IN_SYNC) ? 2217 P_RS_WRITE_ACK : P_WRITE_ACK; 2218 err = drbd_send_ack(peer_device, pcmd, peer_req); 2219 if (pcmd == P_RS_WRITE_ACK) 2220 drbd_set_in_sync(device, sector, peer_req->i.size); 2221 } else { 2222 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); 2223 /* we expect it to be marked out of sync anyways... 2224 * maybe assert this? */ 2225 } 2226 dec_unacked(device); 2227 } 2228 2229 /* we delete from the conflict detection hash _after_ we sent out the 2230 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 2231 if (peer_req->flags & EE_IN_INTERVAL_TREE) { 2232 spin_lock_irq(&device->resource->req_lock); 2233 D_ASSERT(device, !drbd_interval_empty(&peer_req->i)); 2234 drbd_remove_epoch_entry_interval(device, peer_req); 2235 if (peer_req->flags & EE_RESTART_REQUESTS) 2236 restart_conflicting_writes(device, sector, peer_req->i.size); 2237 spin_unlock_irq(&device->resource->req_lock); 2238 } else 2239 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 2240 2241 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 2242 2243 return err; 2244 } 2245 2246 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack) 2247 { 2248 struct drbd_peer_request *peer_req = 2249 container_of(w, struct drbd_peer_request, w); 2250 struct drbd_peer_device *peer_device = peer_req->peer_device; 2251 int err; 2252 2253 err = drbd_send_ack(peer_device, ack, peer_req); 2254 dec_unacked(peer_device->device); 2255 2256 return err; 2257 } 2258 2259 static int e_send_superseded(struct drbd_work *w, int unused) 2260 { 2261 return e_send_ack(w, P_SUPERSEDED); 2262 } 2263 2264 static int e_send_retry_write(struct drbd_work *w, int unused) 2265 { 2266 struct drbd_peer_request *peer_req = 2267 container_of(w, struct drbd_peer_request, w); 2268 struct drbd_connection *connection = peer_req->peer_device->connection; 2269 2270 return e_send_ack(w, connection->agreed_pro_version >= 100 ? 2271 P_RETRY_WRITE : P_SUPERSEDED); 2272 } 2273 2274 static bool seq_greater(u32 a, u32 b) 2275 { 2276 /* 2277 * We assume 32-bit wrap-around here. 2278 * For 24-bit wrap-around, we would have to shift: 2279 * a <<= 8; b <<= 8; 2280 */ 2281 return (s32)a - (s32)b > 0; 2282 } 2283 2284 static u32 seq_max(u32 a, u32 b) 2285 { 2286 return seq_greater(a, b) ? a : b; 2287 } 2288 2289 static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq) 2290 { 2291 struct drbd_device *device = peer_device->device; 2292 unsigned int newest_peer_seq; 2293 2294 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) { 2295 spin_lock(&device->peer_seq_lock); 2296 newest_peer_seq = seq_max(device->peer_seq, peer_seq); 2297 device->peer_seq = newest_peer_seq; 2298 spin_unlock(&device->peer_seq_lock); 2299 /* wake up only if we actually changed device->peer_seq */ 2300 if (peer_seq == newest_peer_seq) 2301 wake_up(&device->seq_wait); 2302 } 2303 } 2304 2305 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) 2306 { 2307 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); 2308 } 2309 2310 /* maybe change sync_ee into interval trees as well? */ 2311 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req) 2312 { 2313 struct drbd_peer_request *rs_req; 2314 bool rv = false; 2315 2316 spin_lock_irq(&device->resource->req_lock); 2317 list_for_each_entry(rs_req, &device->sync_ee, w.list) { 2318 if (overlaps(peer_req->i.sector, peer_req->i.size, 2319 rs_req->i.sector, rs_req->i.size)) { 2320 rv = true; 2321 break; 2322 } 2323 } 2324 spin_unlock_irq(&device->resource->req_lock); 2325 2326 return rv; 2327 } 2328 2329 /* Called from receive_Data. 2330 * Synchronize packets on sock with packets on msock. 2331 * 2332 * This is here so even when a P_DATA packet traveling via sock overtook an Ack 2333 * packet traveling on msock, they are still processed in the order they have 2334 * been sent. 2335 * 2336 * Note: we don't care for Ack packets overtaking P_DATA packets. 2337 * 2338 * In case packet_seq is larger than device->peer_seq number, there are 2339 * outstanding packets on the msock. We wait for them to arrive. 2340 * In case we are the logically next packet, we update device->peer_seq 2341 * ourselves. Correctly handles 32bit wrap around. 2342 * 2343 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, 2344 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds 2345 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have 2346 * 1<<9 == 512 seconds aka ages for the 32bit wrap around... 2347 * 2348 * returns 0 if we may process the packet, 2349 * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 2350 static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq) 2351 { 2352 struct drbd_device *device = peer_device->device; 2353 DEFINE_WAIT(wait); 2354 long timeout; 2355 int ret = 0, tp; 2356 2357 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) 2358 return 0; 2359 2360 spin_lock(&device->peer_seq_lock); 2361 for (;;) { 2362 if (!seq_greater(peer_seq - 1, device->peer_seq)) { 2363 device->peer_seq = seq_max(device->peer_seq, peer_seq); 2364 break; 2365 } 2366 2367 if (signal_pending(current)) { 2368 ret = -ERESTARTSYS; 2369 break; 2370 } 2371 2372 rcu_read_lock(); 2373 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries; 2374 rcu_read_unlock(); 2375 2376 if (!tp) 2377 break; 2378 2379 /* Only need to wait if two_primaries is enabled */ 2380 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE); 2381 spin_unlock(&device->peer_seq_lock); 2382 rcu_read_lock(); 2383 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10; 2384 rcu_read_unlock(); 2385 timeout = schedule_timeout(timeout); 2386 spin_lock(&device->peer_seq_lock); 2387 if (!timeout) { 2388 ret = -ETIMEDOUT; 2389 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n"); 2390 break; 2391 } 2392 } 2393 spin_unlock(&device->peer_seq_lock); 2394 finish_wait(&device->seq_wait, &wait); 2395 return ret; 2396 } 2397 2398 static enum req_op wire_flags_to_bio_op(u32 dpf) 2399 { 2400 if (dpf & DP_ZEROES) 2401 return REQ_OP_WRITE_ZEROES; 2402 if (dpf & DP_DISCARD) 2403 return REQ_OP_DISCARD; 2404 else 2405 return REQ_OP_WRITE; 2406 } 2407 2408 /* see also bio_flags_to_wire() */ 2409 static blk_opf_t wire_flags_to_bio(struct drbd_connection *connection, u32 dpf) 2410 { 2411 return wire_flags_to_bio_op(dpf) | 2412 (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 2413 (dpf & DP_FUA ? REQ_FUA : 0) | 2414 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); 2415 } 2416 2417 static void fail_postponed_requests(struct drbd_device *device, sector_t sector, 2418 unsigned int size) 2419 { 2420 struct drbd_interval *i; 2421 2422 repeat: 2423 drbd_for_each_overlap(i, &device->write_requests, sector, size) { 2424 struct drbd_request *req; 2425 struct bio_and_error m; 2426 2427 if (!i->local) 2428 continue; 2429 req = container_of(i, struct drbd_request, i); 2430 if (!(req->rq_state & RQ_POSTPONED)) 2431 continue; 2432 req->rq_state &= ~RQ_POSTPONED; 2433 __req_mod(req, NEG_ACKED, &m); 2434 spin_unlock_irq(&device->resource->req_lock); 2435 if (m.bio) 2436 complete_master_bio(device, &m); 2437 spin_lock_irq(&device->resource->req_lock); 2438 goto repeat; 2439 } 2440 } 2441 2442 static int handle_write_conflicts(struct drbd_device *device, 2443 struct drbd_peer_request *peer_req) 2444 { 2445 struct drbd_connection *connection = peer_req->peer_device->connection; 2446 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags); 2447 sector_t sector = peer_req->i.sector; 2448 const unsigned int size = peer_req->i.size; 2449 struct drbd_interval *i; 2450 bool equal; 2451 int err; 2452 2453 /* 2454 * Inserting the peer request into the write_requests tree will prevent 2455 * new conflicting local requests from being added. 2456 */ 2457 drbd_insert_interval(&device->write_requests, &peer_req->i); 2458 2459 repeat: 2460 drbd_for_each_overlap(i, &device->write_requests, sector, size) { 2461 if (i == &peer_req->i) 2462 continue; 2463 if (i->completed) 2464 continue; 2465 2466 if (!i->local) { 2467 /* 2468 * Our peer has sent a conflicting remote request; this 2469 * should not happen in a two-node setup. Wait for the 2470 * earlier peer request to complete. 2471 */ 2472 err = drbd_wait_misc(device, i); 2473 if (err) 2474 goto out; 2475 goto repeat; 2476 } 2477 2478 equal = i->sector == sector && i->size == size; 2479 if (resolve_conflicts) { 2480 /* 2481 * If the peer request is fully contained within the 2482 * overlapping request, it can be considered overwritten 2483 * and thus superseded; otherwise, it will be retried 2484 * once all overlapping requests have completed. 2485 */ 2486 bool superseded = i->sector <= sector && i->sector + 2487 (i->size >> 9) >= sector + (size >> 9); 2488 2489 if (!equal) 2490 drbd_alert(device, "Concurrent writes detected: " 2491 "local=%llus +%u, remote=%llus +%u, " 2492 "assuming %s came first\n", 2493 (unsigned long long)i->sector, i->size, 2494 (unsigned long long)sector, size, 2495 superseded ? "local" : "remote"); 2496 2497 peer_req->w.cb = superseded ? e_send_superseded : 2498 e_send_retry_write; 2499 list_add_tail(&peer_req->w.list, &device->done_ee); 2500 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); 2501 2502 err = -ENOENT; 2503 goto out; 2504 } else { 2505 struct drbd_request *req = 2506 container_of(i, struct drbd_request, i); 2507 2508 if (!equal) 2509 drbd_alert(device, "Concurrent writes detected: " 2510 "local=%llus +%u, remote=%llus +%u\n", 2511 (unsigned long long)i->sector, i->size, 2512 (unsigned long long)sector, size); 2513 2514 if (req->rq_state & RQ_LOCAL_PENDING || 2515 !(req->rq_state & RQ_POSTPONED)) { 2516 /* 2517 * Wait for the node with the discard flag to 2518 * decide if this request has been superseded 2519 * or needs to be retried. 2520 * Requests that have been superseded will 2521 * disappear from the write_requests tree. 2522 * 2523 * In addition, wait for the conflicting 2524 * request to finish locally before submitting 2525 * the conflicting peer request. 2526 */ 2527 err = drbd_wait_misc(device, &req->i); 2528 if (err) { 2529 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD); 2530 fail_postponed_requests(device, sector, size); 2531 goto out; 2532 } 2533 goto repeat; 2534 } 2535 /* 2536 * Remember to restart the conflicting requests after 2537 * the new peer request has completed. 2538 */ 2539 peer_req->flags |= EE_RESTART_REQUESTS; 2540 } 2541 } 2542 err = 0; 2543 2544 out: 2545 if (err) 2546 drbd_remove_epoch_entry_interval(device, peer_req); 2547 return err; 2548 } 2549 2550 /* mirrored write */ 2551 static int receive_Data(struct drbd_connection *connection, struct packet_info *pi) 2552 { 2553 struct drbd_peer_device *peer_device; 2554 struct drbd_device *device; 2555 struct net_conf *nc; 2556 sector_t sector; 2557 struct drbd_peer_request *peer_req; 2558 struct p_data *p = pi->data; 2559 u32 peer_seq = be32_to_cpu(p->seq_num); 2560 u32 dp_flags; 2561 int err, tp; 2562 2563 peer_device = conn_peer_device(connection, pi->vnr); 2564 if (!peer_device) 2565 return -EIO; 2566 device = peer_device->device; 2567 2568 if (!get_ldev(device)) { 2569 int err2; 2570 2571 err = wait_for_and_update_peer_seq(peer_device, peer_seq); 2572 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); 2573 atomic_inc(&connection->current_epoch->epoch_size); 2574 err2 = drbd_drain_block(peer_device, pi->size); 2575 if (!err) 2576 err = err2; 2577 return err; 2578 } 2579 2580 /* 2581 * Corresponding put_ldev done either below (on various errors), or in 2582 * drbd_peer_request_endio, if we successfully submit the data at the 2583 * end of this function. 2584 */ 2585 2586 sector = be64_to_cpu(p->sector); 2587 peer_req = read_in_block(peer_device, p->block_id, sector, pi); 2588 if (!peer_req) { 2589 put_ldev(device); 2590 return -EIO; 2591 } 2592 2593 peer_req->w.cb = e_end_block; 2594 peer_req->submit_jif = jiffies; 2595 peer_req->flags |= EE_APPLICATION; 2596 2597 dp_flags = be32_to_cpu(p->dp_flags); 2598 peer_req->opf = wire_flags_to_bio(connection, dp_flags); 2599 if (pi->cmd == P_TRIM) { 2600 D_ASSERT(peer_device, peer_req->i.size > 0); 2601 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_DISCARD); 2602 D_ASSERT(peer_device, peer_req->pages == NULL); 2603 /* need to play safe: an older DRBD sender 2604 * may mean zero-out while sending P_TRIM. */ 2605 if (0 == (connection->agreed_features & DRBD_FF_WZEROES)) 2606 peer_req->flags |= EE_ZEROOUT; 2607 } else if (pi->cmd == P_ZEROES) { 2608 D_ASSERT(peer_device, peer_req->i.size > 0); 2609 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_WRITE_ZEROES); 2610 D_ASSERT(peer_device, peer_req->pages == NULL); 2611 /* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */ 2612 if (dp_flags & DP_DISCARD) 2613 peer_req->flags |= EE_TRIM; 2614 } else if (peer_req->pages == NULL) { 2615 D_ASSERT(device, peer_req->i.size == 0); 2616 D_ASSERT(device, dp_flags & DP_FLUSH); 2617 } 2618 2619 if (dp_flags & DP_MAY_SET_IN_SYNC) 2620 peer_req->flags |= EE_MAY_SET_IN_SYNC; 2621 2622 spin_lock(&connection->epoch_lock); 2623 peer_req->epoch = connection->current_epoch; 2624 atomic_inc(&peer_req->epoch->epoch_size); 2625 atomic_inc(&peer_req->epoch->active); 2626 spin_unlock(&connection->epoch_lock); 2627 2628 rcu_read_lock(); 2629 nc = rcu_dereference(peer_device->connection->net_conf); 2630 tp = nc->two_primaries; 2631 if (peer_device->connection->agreed_pro_version < 100) { 2632 switch (nc->wire_protocol) { 2633 case DRBD_PROT_C: 2634 dp_flags |= DP_SEND_WRITE_ACK; 2635 break; 2636 case DRBD_PROT_B: 2637 dp_flags |= DP_SEND_RECEIVE_ACK; 2638 break; 2639 } 2640 } 2641 rcu_read_unlock(); 2642 2643 if (dp_flags & DP_SEND_WRITE_ACK) { 2644 peer_req->flags |= EE_SEND_WRITE_ACK; 2645 inc_unacked(device); 2646 /* corresponding dec_unacked() in e_end_block() 2647 * respective _drbd_clear_done_ee */ 2648 } 2649 2650 if (dp_flags & DP_SEND_RECEIVE_ACK) { 2651 /* I really don't like it that the receiver thread 2652 * sends on the msock, but anyways */ 2653 drbd_send_ack(peer_device, P_RECV_ACK, peer_req); 2654 } 2655 2656 if (tp) { 2657 /* two primaries implies protocol C */ 2658 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK); 2659 peer_req->flags |= EE_IN_INTERVAL_TREE; 2660 err = wait_for_and_update_peer_seq(peer_device, peer_seq); 2661 if (err) 2662 goto out_interrupted; 2663 spin_lock_irq(&device->resource->req_lock); 2664 err = handle_write_conflicts(device, peer_req); 2665 if (err) { 2666 spin_unlock_irq(&device->resource->req_lock); 2667 if (err == -ENOENT) { 2668 put_ldev(device); 2669 return 0; 2670 } 2671 goto out_interrupted; 2672 } 2673 } else { 2674 update_peer_seq(peer_device, peer_seq); 2675 spin_lock_irq(&device->resource->req_lock); 2676 } 2677 /* TRIM and is processed synchronously, 2678 * we wait for all pending requests, respectively wait for 2679 * active_ee to become empty in drbd_submit_peer_request(); 2680 * better not add ourselves here. */ 2681 if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0) 2682 list_add_tail(&peer_req->w.list, &device->active_ee); 2683 spin_unlock_irq(&device->resource->req_lock); 2684 2685 if (device->state.conn == C_SYNC_TARGET) 2686 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); 2687 2688 if (device->state.pdsk < D_INCONSISTENT) { 2689 /* In case we have the only disk of the cluster, */ 2690 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); 2691 peer_req->flags &= ~EE_MAY_SET_IN_SYNC; 2692 drbd_al_begin_io(device, &peer_req->i); 2693 peer_req->flags |= EE_CALL_AL_COMPLETE_IO; 2694 } 2695 2696 err = drbd_submit_peer_request(peer_req); 2697 if (!err) 2698 return 0; 2699 2700 /* don't care for the reason here */ 2701 drbd_err(device, "submit failed, triggering re-connect\n"); 2702 spin_lock_irq(&device->resource->req_lock); 2703 list_del(&peer_req->w.list); 2704 drbd_remove_epoch_entry_interval(device, peer_req); 2705 spin_unlock_irq(&device->resource->req_lock); 2706 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) { 2707 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; 2708 drbd_al_complete_io(device, &peer_req->i); 2709 } 2710 2711 out_interrupted: 2712 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP); 2713 put_ldev(device); 2714 drbd_free_peer_req(device, peer_req); 2715 return err; 2716 } 2717 2718 /* We may throttle resync, if the lower device seems to be busy, 2719 * and current sync rate is above c_min_rate. 2720 * 2721 * To decide whether or not the lower device is busy, we use a scheme similar 2722 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" 2723 * (more than 64 sectors) of activity we cannot account for with our own resync 2724 * activity, it obviously is "busy". 2725 * 2726 * The current sync rate used here uses only the most recent two step marks, 2727 * to have a short time average so we can react faster. 2728 */ 2729 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, 2730 bool throttle_if_app_is_waiting) 2731 { 2732 struct lc_element *tmp; 2733 bool throttle = drbd_rs_c_min_rate_throttle(device); 2734 2735 if (!throttle || throttle_if_app_is_waiting) 2736 return throttle; 2737 2738 spin_lock_irq(&device->al_lock); 2739 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); 2740 if (tmp) { 2741 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 2742 if (test_bit(BME_PRIORITY, &bm_ext->flags)) 2743 throttle = false; 2744 /* Do not slow down if app IO is already waiting for this extent, 2745 * and our progress is necessary for application IO to complete. */ 2746 } 2747 spin_unlock_irq(&device->al_lock); 2748 2749 return throttle; 2750 } 2751 2752 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) 2753 { 2754 struct gendisk *disk = device->ldev->backing_bdev->bd_disk; 2755 unsigned long db, dt, dbdt; 2756 unsigned int c_min_rate; 2757 int curr_events; 2758 2759 rcu_read_lock(); 2760 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; 2761 rcu_read_unlock(); 2762 2763 /* feature disabled? */ 2764 if (c_min_rate == 0) 2765 return false; 2766 2767 curr_events = (int)part_stat_read_accum(disk->part0, sectors) - 2768 atomic_read(&device->rs_sect_ev); 2769 2770 if (atomic_read(&device->ap_actlog_cnt) 2771 || curr_events - device->rs_last_events > 64) { 2772 unsigned long rs_left; 2773 int i; 2774 2775 device->rs_last_events = curr_events; 2776 2777 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, 2778 * approx. */ 2779 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 2780 2781 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) 2782 rs_left = device->ov_left; 2783 else 2784 rs_left = drbd_bm_total_weight(device) - device->rs_failed; 2785 2786 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ; 2787 if (!dt) 2788 dt++; 2789 db = device->rs_mark_left[i] - rs_left; 2790 dbdt = Bit2KB(db/dt); 2791 2792 if (dbdt > c_min_rate) 2793 return true; 2794 } 2795 return false; 2796 } 2797 2798 static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) 2799 { 2800 struct drbd_peer_device *peer_device; 2801 struct drbd_device *device; 2802 sector_t sector; 2803 sector_t capacity; 2804 struct drbd_peer_request *peer_req; 2805 struct digest_info *di = NULL; 2806 int size, verb; 2807 struct p_block_req *p = pi->data; 2808 2809 peer_device = conn_peer_device(connection, pi->vnr); 2810 if (!peer_device) 2811 return -EIO; 2812 device = peer_device->device; 2813 capacity = get_capacity(device->vdisk); 2814 2815 sector = be64_to_cpu(p->sector); 2816 size = be32_to_cpu(p->blksize); 2817 2818 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { 2819 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2820 (unsigned long long)sector, size); 2821 return -EINVAL; 2822 } 2823 if (sector + (size>>9) > capacity) { 2824 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2825 (unsigned long long)sector, size); 2826 return -EINVAL; 2827 } 2828 2829 if (!get_ldev_if_state(device, D_UP_TO_DATE)) { 2830 verb = 1; 2831 switch (pi->cmd) { 2832 case P_DATA_REQUEST: 2833 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p); 2834 break; 2835 case P_RS_THIN_REQ: 2836 case P_RS_DATA_REQUEST: 2837 case P_CSUM_RS_REQUEST: 2838 case P_OV_REQUEST: 2839 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p); 2840 break; 2841 case P_OV_REPLY: 2842 verb = 0; 2843 dec_rs_pending(device); 2844 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); 2845 break; 2846 default: 2847 BUG(); 2848 } 2849 if (verb && drbd_ratelimit()) 2850 drbd_err(device, "Can not satisfy peer's read request, " 2851 "no local data.\n"); 2852 2853 /* drain possibly payload */ 2854 return drbd_drain_block(peer_device, pi->size); 2855 } 2856 2857 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2858 * "criss-cross" setup, that might cause write-out on some other DRBD, 2859 * which in turn might block on the other node at this very place. */ 2860 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, 2861 size, GFP_NOIO); 2862 if (!peer_req) { 2863 put_ldev(device); 2864 return -ENOMEM; 2865 } 2866 peer_req->opf = REQ_OP_READ; 2867 2868 switch (pi->cmd) { 2869 case P_DATA_REQUEST: 2870 peer_req->w.cb = w_e_end_data_req; 2871 /* application IO, don't drbd_rs_begin_io */ 2872 peer_req->flags |= EE_APPLICATION; 2873 goto submit; 2874 2875 case P_RS_THIN_REQ: 2876 /* If at some point in the future we have a smart way to 2877 find out if this data block is completely deallocated, 2878 then we would do something smarter here than reading 2879 the block... */ 2880 peer_req->flags |= EE_RS_THIN_REQ; 2881 fallthrough; 2882 case P_RS_DATA_REQUEST: 2883 peer_req->w.cb = w_e_end_rsdata_req; 2884 /* used in the sector offset progress display */ 2885 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2886 break; 2887 2888 case P_OV_REPLY: 2889 case P_CSUM_RS_REQUEST: 2890 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO); 2891 if (!di) 2892 goto out_free_e; 2893 2894 di->digest_size = pi->size; 2895 di->digest = (((char *)di)+sizeof(struct digest_info)); 2896 2897 peer_req->digest = di; 2898 peer_req->flags |= EE_HAS_DIGEST; 2899 2900 if (drbd_recv_all(peer_device->connection, di->digest, pi->size)) 2901 goto out_free_e; 2902 2903 if (pi->cmd == P_CSUM_RS_REQUEST) { 2904 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); 2905 peer_req->w.cb = w_e_end_csum_rs_req; 2906 /* used in the sector offset progress display */ 2907 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2908 /* remember to report stats in drbd_resync_finished */ 2909 device->use_csums = true; 2910 } else if (pi->cmd == P_OV_REPLY) { 2911 /* track progress, we may need to throttle */ 2912 atomic_add(size >> 9, &device->rs_sect_in); 2913 peer_req->w.cb = w_e_end_ov_reply; 2914 dec_rs_pending(device); 2915 /* drbd_rs_begin_io done when we sent this request, 2916 * but accounting still needs to be done. */ 2917 goto submit_for_resync; 2918 } 2919 break; 2920 2921 case P_OV_REQUEST: 2922 if (device->ov_start_sector == ~(sector_t)0 && 2923 peer_device->connection->agreed_pro_version >= 90) { 2924 unsigned long now = jiffies; 2925 int i; 2926 device->ov_start_sector = sector; 2927 device->ov_position = sector; 2928 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); 2929 device->rs_total = device->ov_left; 2930 for (i = 0; i < DRBD_SYNC_MARKS; i++) { 2931 device->rs_mark_left[i] = device->ov_left; 2932 device->rs_mark_time[i] = now; 2933 } 2934 drbd_info(device, "Online Verify start sector: %llu\n", 2935 (unsigned long long)sector); 2936 } 2937 peer_req->w.cb = w_e_end_ov_req; 2938 break; 2939 2940 default: 2941 BUG(); 2942 } 2943 2944 /* Throttle, drbd_rs_begin_io and submit should become asynchronous 2945 * wrt the receiver, but it is not as straightforward as it may seem. 2946 * Various places in the resync start and stop logic assume resync 2947 * requests are processed in order, requeuing this on the worker thread 2948 * introduces a bunch of new code for synchronization between threads. 2949 * 2950 * Unlimited throttling before drbd_rs_begin_io may stall the resync 2951 * "forever", throttling after drbd_rs_begin_io will lock that extent 2952 * for application writes for the same time. For now, just throttle 2953 * here, where the rest of the code expects the receiver to sleep for 2954 * a while, anyways. 2955 */ 2956 2957 /* Throttle before drbd_rs_begin_io, as that locks out application IO; 2958 * this defers syncer requests for some time, before letting at least 2959 * on request through. The resync controller on the receiving side 2960 * will adapt to the incoming rate accordingly. 2961 * 2962 * We cannot throttle here if remote is Primary/SyncTarget: 2963 * we would also throttle its application reads. 2964 * In that case, throttling is done on the SyncTarget only. 2965 */ 2966 2967 /* Even though this may be a resync request, we do add to "read_ee"; 2968 * "sync_ee" is only used for resync WRITEs. 2969 * Add to list early, so debugfs can find this request 2970 * even if we have to sleep below. */ 2971 spin_lock_irq(&device->resource->req_lock); 2972 list_add_tail(&peer_req->w.list, &device->read_ee); 2973 spin_unlock_irq(&device->resource->req_lock); 2974 2975 update_receiver_timing_details(connection, drbd_rs_should_slow_down); 2976 if (device->state.peer != R_PRIMARY 2977 && drbd_rs_should_slow_down(device, sector, false)) 2978 schedule_timeout_uninterruptible(HZ/10); 2979 update_receiver_timing_details(connection, drbd_rs_begin_io); 2980 if (drbd_rs_begin_io(device, sector)) 2981 goto out_free_e; 2982 2983 submit_for_resync: 2984 atomic_add(size >> 9, &device->rs_sect_ev); 2985 2986 submit: 2987 update_receiver_timing_details(connection, drbd_submit_peer_request); 2988 inc_unacked(device); 2989 if (drbd_submit_peer_request(peer_req) == 0) 2990 return 0; 2991 2992 /* don't care for the reason here */ 2993 drbd_err(device, "submit failed, triggering re-connect\n"); 2994 2995 out_free_e: 2996 spin_lock_irq(&device->resource->req_lock); 2997 list_del(&peer_req->w.list); 2998 spin_unlock_irq(&device->resource->req_lock); 2999 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 3000 3001 put_ldev(device); 3002 drbd_free_peer_req(device, peer_req); 3003 return -EIO; 3004 } 3005 3006 /* 3007 * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries 3008 */ 3009 static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local) 3010 { 3011 struct drbd_device *device = peer_device->device; 3012 int self, peer, rv = -100; 3013 unsigned long ch_self, ch_peer; 3014 enum drbd_after_sb_p after_sb_0p; 3015 3016 self = device->ldev->md.uuid[UI_BITMAP] & 1; 3017 peer = device->p_uuid[UI_BITMAP] & 1; 3018 3019 ch_peer = device->p_uuid[UI_SIZE]; 3020 ch_self = device->comm_bm_set; 3021 3022 rcu_read_lock(); 3023 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p; 3024 rcu_read_unlock(); 3025 switch (after_sb_0p) { 3026 case ASB_CONSENSUS: 3027 case ASB_DISCARD_SECONDARY: 3028 case ASB_CALL_HELPER: 3029 case ASB_VIOLENTLY: 3030 drbd_err(device, "Configuration error.\n"); 3031 break; 3032 case ASB_DISCONNECT: 3033 break; 3034 case ASB_DISCARD_YOUNGER_PRI: 3035 if (self == 0 && peer == 1) { 3036 rv = -1; 3037 break; 3038 } 3039 if (self == 1 && peer == 0) { 3040 rv = 1; 3041 break; 3042 } 3043 fallthrough; /* to one of the other strategies */ 3044 case ASB_DISCARD_OLDER_PRI: 3045 if (self == 0 && peer == 1) { 3046 rv = 1; 3047 break; 3048 } 3049 if (self == 1 && peer == 0) { 3050 rv = -1; 3051 break; 3052 } 3053 /* Else fall through to one of the other strategies... */ 3054 drbd_warn(device, "Discard younger/older primary did not find a decision\n" 3055 "Using discard-least-changes instead\n"); 3056 fallthrough; 3057 case ASB_DISCARD_ZERO_CHG: 3058 if (ch_peer == 0 && ch_self == 0) { 3059 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) 3060 ? -1 : 1; 3061 break; 3062 } else { 3063 if (ch_peer == 0) { rv = 1; break; } 3064 if (ch_self == 0) { rv = -1; break; } 3065 } 3066 if (after_sb_0p == ASB_DISCARD_ZERO_CHG) 3067 break; 3068 fallthrough; 3069 case ASB_DISCARD_LEAST_CHG: 3070 if (ch_self < ch_peer) 3071 rv = -1; 3072 else if (ch_self > ch_peer) 3073 rv = 1; 3074 else /* ( ch_self == ch_peer ) */ 3075 /* Well, then use something else. */ 3076 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) 3077 ? -1 : 1; 3078 break; 3079 case ASB_DISCARD_LOCAL: 3080 rv = -1; 3081 break; 3082 case ASB_DISCARD_REMOTE: 3083 rv = 1; 3084 } 3085 3086 return rv; 3087 } 3088 3089 /* 3090 * drbd_asb_recover_1p - Recover after split-brain with one remaining primary 3091 */ 3092 static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local) 3093 { 3094 struct drbd_device *device = peer_device->device; 3095 int hg, rv = -100; 3096 enum drbd_after_sb_p after_sb_1p; 3097 3098 rcu_read_lock(); 3099 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p; 3100 rcu_read_unlock(); 3101 switch (after_sb_1p) { 3102 case ASB_DISCARD_YOUNGER_PRI: 3103 case ASB_DISCARD_OLDER_PRI: 3104 case ASB_DISCARD_LEAST_CHG: 3105 case ASB_DISCARD_LOCAL: 3106 case ASB_DISCARD_REMOTE: 3107 case ASB_DISCARD_ZERO_CHG: 3108 drbd_err(device, "Configuration error.\n"); 3109 break; 3110 case ASB_DISCONNECT: 3111 break; 3112 case ASB_CONSENSUS: 3113 hg = drbd_asb_recover_0p(peer_device); 3114 if (hg == -1 && device->state.role == R_SECONDARY) 3115 rv = hg; 3116 if (hg == 1 && device->state.role == R_PRIMARY) 3117 rv = hg; 3118 break; 3119 case ASB_VIOLENTLY: 3120 rv = drbd_asb_recover_0p(peer_device); 3121 break; 3122 case ASB_DISCARD_SECONDARY: 3123 return device->state.role == R_PRIMARY ? 1 : -1; 3124 case ASB_CALL_HELPER: 3125 hg = drbd_asb_recover_0p(peer_device); 3126 if (hg == -1 && device->state.role == R_PRIMARY) { 3127 enum drbd_state_rv rv2; 3128 3129 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 3130 * we might be here in C_WF_REPORT_PARAMS which is transient. 3131 * we do not need to wait for the after state change work either. */ 3132 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); 3133 if (rv2 != SS_SUCCESS) { 3134 drbd_khelper(device, "pri-lost-after-sb"); 3135 } else { 3136 drbd_warn(device, "Successfully gave up primary role.\n"); 3137 rv = hg; 3138 } 3139 } else 3140 rv = hg; 3141 } 3142 3143 return rv; 3144 } 3145 3146 /* 3147 * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries 3148 */ 3149 static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local) 3150 { 3151 struct drbd_device *device = peer_device->device; 3152 int hg, rv = -100; 3153 enum drbd_after_sb_p after_sb_2p; 3154 3155 rcu_read_lock(); 3156 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p; 3157 rcu_read_unlock(); 3158 switch (after_sb_2p) { 3159 case ASB_DISCARD_YOUNGER_PRI: 3160 case ASB_DISCARD_OLDER_PRI: 3161 case ASB_DISCARD_LEAST_CHG: 3162 case ASB_DISCARD_LOCAL: 3163 case ASB_DISCARD_REMOTE: 3164 case ASB_CONSENSUS: 3165 case ASB_DISCARD_SECONDARY: 3166 case ASB_DISCARD_ZERO_CHG: 3167 drbd_err(device, "Configuration error.\n"); 3168 break; 3169 case ASB_VIOLENTLY: 3170 rv = drbd_asb_recover_0p(peer_device); 3171 break; 3172 case ASB_DISCONNECT: 3173 break; 3174 case ASB_CALL_HELPER: 3175 hg = drbd_asb_recover_0p(peer_device); 3176 if (hg == -1) { 3177 enum drbd_state_rv rv2; 3178 3179 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 3180 * we might be here in C_WF_REPORT_PARAMS which is transient. 3181 * we do not need to wait for the after state change work either. */ 3182 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); 3183 if (rv2 != SS_SUCCESS) { 3184 drbd_khelper(device, "pri-lost-after-sb"); 3185 } else { 3186 drbd_warn(device, "Successfully gave up primary role.\n"); 3187 rv = hg; 3188 } 3189 } else 3190 rv = hg; 3191 } 3192 3193 return rv; 3194 } 3195 3196 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, 3197 u64 bits, u64 flags) 3198 { 3199 if (!uuid) { 3200 drbd_info(device, "%s uuid info vanished while I was looking!\n", text); 3201 return; 3202 } 3203 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 3204 text, 3205 (unsigned long long)uuid[UI_CURRENT], 3206 (unsigned long long)uuid[UI_BITMAP], 3207 (unsigned long long)uuid[UI_HISTORY_START], 3208 (unsigned long long)uuid[UI_HISTORY_END], 3209 (unsigned long long)bits, 3210 (unsigned long long)flags); 3211 } 3212 3213 /* 3214 100 after split brain try auto recover 3215 2 C_SYNC_SOURCE set BitMap 3216 1 C_SYNC_SOURCE use BitMap 3217 0 no Sync 3218 -1 C_SYNC_TARGET use BitMap 3219 -2 C_SYNC_TARGET set BitMap 3220 -100 after split brain, disconnect 3221 -1000 unrelated data 3222 -1091 requires proto 91 3223 -1096 requires proto 96 3224 */ 3225 3226 static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local) 3227 { 3228 struct drbd_peer_device *const peer_device = first_peer_device(device); 3229 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; 3230 u64 self, peer; 3231 int i, j; 3232 3233 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 3234 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); 3235 3236 *rule_nr = 10; 3237 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) 3238 return 0; 3239 3240 *rule_nr = 20; 3241 if ((self == UUID_JUST_CREATED || self == (u64)0) && 3242 peer != UUID_JUST_CREATED) 3243 return -2; 3244 3245 *rule_nr = 30; 3246 if (self != UUID_JUST_CREATED && 3247 (peer == UUID_JUST_CREATED || peer == (u64)0)) 3248 return 2; 3249 3250 if (self == peer) { 3251 int rct, dc; /* roles at crash time */ 3252 3253 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { 3254 3255 if (connection->agreed_pro_version < 91) 3256 return -1091; 3257 3258 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 3259 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 3260 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n"); 3261 drbd_uuid_move_history(device); 3262 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; 3263 device->ldev->md.uuid[UI_BITMAP] = 0; 3264 3265 drbd_uuid_dump(device, "self", device->ldev->md.uuid, 3266 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); 3267 *rule_nr = 34; 3268 } else { 3269 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n"); 3270 *rule_nr = 36; 3271 } 3272 3273 return 1; 3274 } 3275 3276 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { 3277 3278 if (connection->agreed_pro_version < 91) 3279 return -1091; 3280 3281 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && 3282 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 3283 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 3284 3285 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START]; 3286 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP]; 3287 device->p_uuid[UI_BITMAP] = 0UL; 3288 3289 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 3290 *rule_nr = 35; 3291 } else { 3292 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n"); 3293 *rule_nr = 37; 3294 } 3295 3296 return -1; 3297 } 3298 3299 /* Common power [off|failure] */ 3300 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) + 3301 (device->p_uuid[UI_FLAGS] & 2); 3302 /* lowest bit is set when we were primary, 3303 * next bit (weight 2) is set when peer was primary */ 3304 *rule_nr = 40; 3305 3306 /* Neither has the "crashed primary" flag set, 3307 * only a replication link hickup. */ 3308 if (rct == 0) 3309 return 0; 3310 3311 /* Current UUID equal and no bitmap uuid; does not necessarily 3312 * mean this was a "simultaneous hard crash", maybe IO was 3313 * frozen, so no UUID-bump happened. 3314 * This is a protocol change, overload DRBD_FF_WSAME as flag 3315 * for "new-enough" peer DRBD version. */ 3316 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) { 3317 *rule_nr = 41; 3318 if (!(connection->agreed_features & DRBD_FF_WSAME)) { 3319 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n"); 3320 return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8)); 3321 } 3322 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) { 3323 /* At least one has the "crashed primary" bit set, 3324 * both are primary now, but neither has rotated its UUIDs? 3325 * "Can not happen." */ 3326 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n"); 3327 return -100; 3328 } 3329 if (device->state.role == R_PRIMARY) 3330 return 1; 3331 return -1; 3332 } 3333 3334 /* Both are secondary. 3335 * Really looks like recovery from simultaneous hard crash. 3336 * Check which had been primary before, and arbitrate. */ 3337 switch (rct) { 3338 case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */ 3339 case 1: /* self_pri && !peer_pri */ return 1; 3340 case 2: /* !self_pri && peer_pri */ return -1; 3341 case 3: /* self_pri && peer_pri */ 3342 dc = test_bit(RESOLVE_CONFLICTS, &connection->flags); 3343 return dc ? -1 : 1; 3344 } 3345 } 3346 3347 *rule_nr = 50; 3348 peer = device->p_uuid[UI_BITMAP] & ~((u64)1); 3349 if (self == peer) 3350 return -1; 3351 3352 *rule_nr = 51; 3353 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); 3354 if (self == peer) { 3355 if (connection->agreed_pro_version < 96 ? 3356 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 3357 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 3358 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { 3359 /* The last P_SYNC_UUID did not get though. Undo the last start of 3360 resync as sync source modifications of the peer's UUIDs. */ 3361 3362 if (connection->agreed_pro_version < 91) 3363 return -1091; 3364 3365 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; 3366 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1]; 3367 3368 drbd_info(device, "Lost last syncUUID packet, corrected:\n"); 3369 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 3370 3371 return -1; 3372 } 3373 } 3374 3375 *rule_nr = 60; 3376 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 3377 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 3378 peer = device->p_uuid[i] & ~((u64)1); 3379 if (self == peer) 3380 return -2; 3381 } 3382 3383 *rule_nr = 70; 3384 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 3385 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); 3386 if (self == peer) 3387 return 1; 3388 3389 *rule_nr = 71; 3390 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 3391 if (self == peer) { 3392 if (connection->agreed_pro_version < 96 ? 3393 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 3394 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 3395 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 3396 /* The last P_SYNC_UUID did not get though. Undo the last start of 3397 resync as sync source modifications of our UUIDs. */ 3398 3399 if (connection->agreed_pro_version < 91) 3400 return -1091; 3401 3402 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); 3403 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]); 3404 3405 drbd_info(device, "Last syncUUID did not get through, corrected:\n"); 3406 drbd_uuid_dump(device, "self", device->ldev->md.uuid, 3407 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); 3408 3409 return 1; 3410 } 3411 } 3412 3413 3414 *rule_nr = 80; 3415 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); 3416 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 3417 self = device->ldev->md.uuid[i] & ~((u64)1); 3418 if (self == peer) 3419 return 2; 3420 } 3421 3422 *rule_nr = 90; 3423 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 3424 peer = device->p_uuid[UI_BITMAP] & ~((u64)1); 3425 if (self == peer && self != ((u64)0)) 3426 return 100; 3427 3428 *rule_nr = 100; 3429 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 3430 self = device->ldev->md.uuid[i] & ~((u64)1); 3431 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { 3432 peer = device->p_uuid[j] & ~((u64)1); 3433 if (self == peer) 3434 return -100; 3435 } 3436 } 3437 3438 return -1000; 3439 } 3440 3441 /* drbd_sync_handshake() returns the new conn state on success, or 3442 CONN_MASK (-1) on failure. 3443 */ 3444 static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, 3445 enum drbd_role peer_role, 3446 enum drbd_disk_state peer_disk) __must_hold(local) 3447 { 3448 struct drbd_device *device = peer_device->device; 3449 enum drbd_conns rv = C_MASK; 3450 enum drbd_disk_state mydisk; 3451 struct net_conf *nc; 3452 int hg, rule_nr, rr_conflict, tentative, always_asbp; 3453 3454 mydisk = device->state.disk; 3455 if (mydisk == D_NEGOTIATING) 3456 mydisk = device->new_state_tmp.disk; 3457 3458 drbd_info(device, "drbd_sync_handshake:\n"); 3459 3460 spin_lock_irq(&device->ldev->md.uuid_lock); 3461 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0); 3462 drbd_uuid_dump(device, "peer", device->p_uuid, 3463 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 3464 3465 hg = drbd_uuid_compare(device, peer_role, &rule_nr); 3466 spin_unlock_irq(&device->ldev->md.uuid_lock); 3467 3468 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 3469 3470 if (hg == -1000) { 3471 drbd_alert(device, "Unrelated data, aborting!\n"); 3472 return C_MASK; 3473 } 3474 if (hg < -0x10000) { 3475 int proto, fflags; 3476 hg = -hg; 3477 proto = hg & 0xff; 3478 fflags = (hg >> 8) & 0xff; 3479 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n", 3480 proto, fflags); 3481 return C_MASK; 3482 } 3483 if (hg < -1000) { 3484 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); 3485 return C_MASK; 3486 } 3487 3488 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || 3489 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { 3490 int f = (hg == -100) || abs(hg) == 2; 3491 hg = mydisk > D_INCONSISTENT ? 1 : -1; 3492 if (f) 3493 hg = hg*2; 3494 drbd_info(device, "Becoming sync %s due to disk states.\n", 3495 hg > 0 ? "source" : "target"); 3496 } 3497 3498 if (abs(hg) == 100) 3499 drbd_khelper(device, "initial-split-brain"); 3500 3501 rcu_read_lock(); 3502 nc = rcu_dereference(peer_device->connection->net_conf); 3503 always_asbp = nc->always_asbp; 3504 rr_conflict = nc->rr_conflict; 3505 tentative = nc->tentative; 3506 rcu_read_unlock(); 3507 3508 if (hg == 100 || (hg == -100 && always_asbp)) { 3509 int pcount = (device->state.role == R_PRIMARY) 3510 + (peer_role == R_PRIMARY); 3511 int forced = (hg == -100); 3512 3513 switch (pcount) { 3514 case 0: 3515 hg = drbd_asb_recover_0p(peer_device); 3516 break; 3517 case 1: 3518 hg = drbd_asb_recover_1p(peer_device); 3519 break; 3520 case 2: 3521 hg = drbd_asb_recover_2p(peer_device); 3522 break; 3523 } 3524 if (abs(hg) < 100) { 3525 drbd_warn(device, "Split-Brain detected, %d primaries, " 3526 "automatically solved. Sync from %s node\n", 3527 pcount, (hg < 0) ? "peer" : "this"); 3528 if (forced) { 3529 drbd_warn(device, "Doing a full sync, since" 3530 " UUIDs where ambiguous.\n"); 3531 hg = hg*2; 3532 } 3533 } 3534 } 3535 3536 if (hg == -100) { 3537 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1)) 3538 hg = -1; 3539 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1)) 3540 hg = 1; 3541 3542 if (abs(hg) < 100) 3543 drbd_warn(device, "Split-Brain detected, manually solved. " 3544 "Sync from %s node\n", 3545 (hg < 0) ? "peer" : "this"); 3546 } 3547 3548 if (hg == -100) { 3549 /* FIXME this log message is not correct if we end up here 3550 * after an attempted attach on a diskless node. 3551 * We just refuse to attach -- well, we drop the "connection" 3552 * to that disk, in a way... */ 3553 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n"); 3554 drbd_khelper(device, "split-brain"); 3555 return C_MASK; 3556 } 3557 3558 if (hg > 0 && mydisk <= D_INCONSISTENT) { 3559 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n"); 3560 return C_MASK; 3561 } 3562 3563 if (hg < 0 && /* by intention we do not use mydisk here. */ 3564 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) { 3565 switch (rr_conflict) { 3566 case ASB_CALL_HELPER: 3567 drbd_khelper(device, "pri-lost"); 3568 fallthrough; 3569 case ASB_DISCONNECT: 3570 drbd_err(device, "I shall become SyncTarget, but I am primary!\n"); 3571 return C_MASK; 3572 case ASB_VIOLENTLY: 3573 drbd_warn(device, "Becoming SyncTarget, violating the stable-data" 3574 "assumption\n"); 3575 } 3576 } 3577 3578 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) { 3579 if (hg == 0) 3580 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n"); 3581 else 3582 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.", 3583 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 3584 abs(hg) >= 2 ? "full" : "bit-map based"); 3585 return C_MASK; 3586 } 3587 3588 if (abs(hg) >= 2) { 3589 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 3590 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", 3591 BM_LOCKED_SET_ALLOWED)) 3592 return C_MASK; 3593 } 3594 3595 if (hg > 0) { /* become sync source. */ 3596 rv = C_WF_BITMAP_S; 3597 } else if (hg < 0) { /* become sync target */ 3598 rv = C_WF_BITMAP_T; 3599 } else { 3600 rv = C_CONNECTED; 3601 if (drbd_bm_total_weight(device)) { 3602 drbd_info(device, "No resync, but %lu bits in bitmap!\n", 3603 drbd_bm_total_weight(device)); 3604 } 3605 } 3606 3607 return rv; 3608 } 3609 3610 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer) 3611 { 3612 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ 3613 if (peer == ASB_DISCARD_REMOTE) 3614 return ASB_DISCARD_LOCAL; 3615 3616 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ 3617 if (peer == ASB_DISCARD_LOCAL) 3618 return ASB_DISCARD_REMOTE; 3619 3620 /* everything else is valid if they are equal on both sides. */ 3621 return peer; 3622 } 3623 3624 static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi) 3625 { 3626 struct p_protocol *p = pi->data; 3627 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 3628 int p_proto, p_discard_my_data, p_two_primaries, cf; 3629 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL; 3630 char integrity_alg[SHARED_SECRET_MAX] = ""; 3631 struct crypto_shash *peer_integrity_tfm = NULL; 3632 void *int_dig_in = NULL, *int_dig_vv = NULL; 3633 3634 p_proto = be32_to_cpu(p->protocol); 3635 p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 3636 p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 3637 p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 3638 p_two_primaries = be32_to_cpu(p->two_primaries); 3639 cf = be32_to_cpu(p->conn_flags); 3640 p_discard_my_data = cf & CF_DISCARD_MY_DATA; 3641 3642 if (connection->agreed_pro_version >= 87) { 3643 int err; 3644 3645 if (pi->size > sizeof(integrity_alg)) 3646 return -EIO; 3647 err = drbd_recv_all(connection, integrity_alg, pi->size); 3648 if (err) 3649 return err; 3650 integrity_alg[SHARED_SECRET_MAX - 1] = 0; 3651 } 3652 3653 if (pi->cmd != P_PROTOCOL_UPDATE) { 3654 clear_bit(CONN_DRY_RUN, &connection->flags); 3655 3656 if (cf & CF_DRY_RUN) 3657 set_bit(CONN_DRY_RUN, &connection->flags); 3658 3659 rcu_read_lock(); 3660 nc = rcu_dereference(connection->net_conf); 3661 3662 if (p_proto != nc->wire_protocol) { 3663 drbd_err(connection, "incompatible %s settings\n", "protocol"); 3664 goto disconnect_rcu_unlock; 3665 } 3666 3667 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) { 3668 drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri"); 3669 goto disconnect_rcu_unlock; 3670 } 3671 3672 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) { 3673 drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri"); 3674 goto disconnect_rcu_unlock; 3675 } 3676 3677 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) { 3678 drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri"); 3679 goto disconnect_rcu_unlock; 3680 } 3681 3682 if (p_discard_my_data && nc->discard_my_data) { 3683 drbd_err(connection, "incompatible %s settings\n", "discard-my-data"); 3684 goto disconnect_rcu_unlock; 3685 } 3686 3687 if (p_two_primaries != nc->two_primaries) { 3688 drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries"); 3689 goto disconnect_rcu_unlock; 3690 } 3691 3692 if (strcmp(integrity_alg, nc->integrity_alg)) { 3693 drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg"); 3694 goto disconnect_rcu_unlock; 3695 } 3696 3697 rcu_read_unlock(); 3698 } 3699 3700 if (integrity_alg[0]) { 3701 int hash_size; 3702 3703 /* 3704 * We can only change the peer data integrity algorithm 3705 * here. Changing our own data integrity algorithm 3706 * requires that we send a P_PROTOCOL_UPDATE packet at 3707 * the same time; otherwise, the peer has no way to 3708 * tell between which packets the algorithm should 3709 * change. 3710 */ 3711 3712 peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0); 3713 if (IS_ERR(peer_integrity_tfm)) { 3714 peer_integrity_tfm = NULL; 3715 drbd_err(connection, "peer data-integrity-alg %s not supported\n", 3716 integrity_alg); 3717 goto disconnect; 3718 } 3719 3720 hash_size = crypto_shash_digestsize(peer_integrity_tfm); 3721 int_dig_in = kmalloc(hash_size, GFP_KERNEL); 3722 int_dig_vv = kmalloc(hash_size, GFP_KERNEL); 3723 if (!(int_dig_in && int_dig_vv)) { 3724 drbd_err(connection, "Allocation of buffers for data integrity checking failed\n"); 3725 goto disconnect; 3726 } 3727 } 3728 3729 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); 3730 if (!new_net_conf) 3731 goto disconnect; 3732 3733 mutex_lock(&connection->data.mutex); 3734 mutex_lock(&connection->resource->conf_update); 3735 old_net_conf = connection->net_conf; 3736 *new_net_conf = *old_net_conf; 3737 3738 new_net_conf->wire_protocol = p_proto; 3739 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p); 3740 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p); 3741 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p); 3742 new_net_conf->two_primaries = p_two_primaries; 3743 3744 rcu_assign_pointer(connection->net_conf, new_net_conf); 3745 mutex_unlock(&connection->resource->conf_update); 3746 mutex_unlock(&connection->data.mutex); 3747 3748 crypto_free_shash(connection->peer_integrity_tfm); 3749 kfree(connection->int_dig_in); 3750 kfree(connection->int_dig_vv); 3751 connection->peer_integrity_tfm = peer_integrity_tfm; 3752 connection->int_dig_in = int_dig_in; 3753 connection->int_dig_vv = int_dig_vv; 3754 3755 if (strcmp(old_net_conf->integrity_alg, integrity_alg)) 3756 drbd_info(connection, "peer data-integrity-alg: %s\n", 3757 integrity_alg[0] ? integrity_alg : "(none)"); 3758 3759 kvfree_rcu(old_net_conf); 3760 return 0; 3761 3762 disconnect_rcu_unlock: 3763 rcu_read_unlock(); 3764 disconnect: 3765 crypto_free_shash(peer_integrity_tfm); 3766 kfree(int_dig_in); 3767 kfree(int_dig_vv); 3768 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 3769 return -EIO; 3770 } 3771 3772 /* helper function 3773 * input: alg name, feature name 3774 * return: NULL (alg name was "") 3775 * ERR_PTR(error) if something goes wrong 3776 * or the crypto hash ptr, if it worked out ok. */ 3777 static struct crypto_shash *drbd_crypto_alloc_digest_safe( 3778 const struct drbd_device *device, 3779 const char *alg, const char *name) 3780 { 3781 struct crypto_shash *tfm; 3782 3783 if (!alg[0]) 3784 return NULL; 3785 3786 tfm = crypto_alloc_shash(alg, 0, 0); 3787 if (IS_ERR(tfm)) { 3788 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n", 3789 alg, name, PTR_ERR(tfm)); 3790 return tfm; 3791 } 3792 return tfm; 3793 } 3794 3795 static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi) 3796 { 3797 void *buffer = connection->data.rbuf; 3798 int size = pi->size; 3799 3800 while (size) { 3801 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE); 3802 s = drbd_recv(connection, buffer, s); 3803 if (s <= 0) { 3804 if (s < 0) 3805 return s; 3806 break; 3807 } 3808 size -= s; 3809 } 3810 if (size) 3811 return -EIO; 3812 return 0; 3813 } 3814 3815 /* 3816 * config_unknown_volume - device configuration command for unknown volume 3817 * 3818 * When a device is added to an existing connection, the node on which the 3819 * device is added first will send configuration commands to its peer but the 3820 * peer will not know about the device yet. It will warn and ignore these 3821 * commands. Once the device is added on the second node, the second node will 3822 * send the same device configuration commands, but in the other direction. 3823 * 3824 * (We can also end up here if drbd is misconfigured.) 3825 */ 3826 static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi) 3827 { 3828 drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n", 3829 cmdname(pi->cmd), pi->vnr); 3830 return ignore_remaining_packet(connection, pi); 3831 } 3832 3833 static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi) 3834 { 3835 struct drbd_peer_device *peer_device; 3836 struct drbd_device *device; 3837 struct p_rs_param_95 *p; 3838 unsigned int header_size, data_size, exp_max_sz; 3839 struct crypto_shash *verify_tfm = NULL; 3840 struct crypto_shash *csums_tfm = NULL; 3841 struct net_conf *old_net_conf, *new_net_conf = NULL; 3842 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; 3843 const int apv = connection->agreed_pro_version; 3844 struct fifo_buffer *old_plan = NULL, *new_plan = NULL; 3845 unsigned int fifo_size = 0; 3846 int err; 3847 3848 peer_device = conn_peer_device(connection, pi->vnr); 3849 if (!peer_device) 3850 return config_unknown_volume(connection, pi); 3851 device = peer_device->device; 3852 3853 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 3854 : apv == 88 ? sizeof(struct p_rs_param) 3855 + SHARED_SECRET_MAX 3856 : apv <= 94 ? sizeof(struct p_rs_param_89) 3857 : /* apv >= 95 */ sizeof(struct p_rs_param_95); 3858 3859 if (pi->size > exp_max_sz) { 3860 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n", 3861 pi->size, exp_max_sz); 3862 return -EIO; 3863 } 3864 3865 if (apv <= 88) { 3866 header_size = sizeof(struct p_rs_param); 3867 data_size = pi->size - header_size; 3868 } else if (apv <= 94) { 3869 header_size = sizeof(struct p_rs_param_89); 3870 data_size = pi->size - header_size; 3871 D_ASSERT(device, data_size == 0); 3872 } else { 3873 header_size = sizeof(struct p_rs_param_95); 3874 data_size = pi->size - header_size; 3875 D_ASSERT(device, data_size == 0); 3876 } 3877 3878 /* initialize verify_alg and csums_alg */ 3879 p = pi->data; 3880 BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX); 3881 memset(&p->algs, 0, sizeof(p->algs)); 3882 3883 err = drbd_recv_all(peer_device->connection, p, header_size); 3884 if (err) 3885 return err; 3886 3887 mutex_lock(&connection->resource->conf_update); 3888 old_net_conf = peer_device->connection->net_conf; 3889 if (get_ldev(device)) { 3890 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 3891 if (!new_disk_conf) { 3892 put_ldev(device); 3893 mutex_unlock(&connection->resource->conf_update); 3894 drbd_err(device, "Allocation of new disk_conf failed\n"); 3895 return -ENOMEM; 3896 } 3897 3898 old_disk_conf = device->ldev->disk_conf; 3899 *new_disk_conf = *old_disk_conf; 3900 3901 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate); 3902 } 3903 3904 if (apv >= 88) { 3905 if (apv == 88) { 3906 if (data_size > SHARED_SECRET_MAX || data_size == 0) { 3907 drbd_err(device, "verify-alg of wrong size, " 3908 "peer wants %u, accepting only up to %u byte\n", 3909 data_size, SHARED_SECRET_MAX); 3910 goto reconnect; 3911 } 3912 3913 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size); 3914 if (err) 3915 goto reconnect; 3916 /* we expect NUL terminated string */ 3917 /* but just in case someone tries to be evil */ 3918 D_ASSERT(device, p->verify_alg[data_size-1] == 0); 3919 p->verify_alg[data_size-1] = 0; 3920 3921 } else /* apv >= 89 */ { 3922 /* we still expect NUL terminated strings */ 3923 /* but just in case someone tries to be evil */ 3924 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0); 3925 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0); 3926 p->verify_alg[SHARED_SECRET_MAX-1] = 0; 3927 p->csums_alg[SHARED_SECRET_MAX-1] = 0; 3928 } 3929 3930 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) { 3931 if (device->state.conn == C_WF_REPORT_PARAMS) { 3932 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 3933 old_net_conf->verify_alg, p->verify_alg); 3934 goto disconnect; 3935 } 3936 verify_tfm = drbd_crypto_alloc_digest_safe(device, 3937 p->verify_alg, "verify-alg"); 3938 if (IS_ERR(verify_tfm)) { 3939 verify_tfm = NULL; 3940 goto disconnect; 3941 } 3942 } 3943 3944 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) { 3945 if (device->state.conn == C_WF_REPORT_PARAMS) { 3946 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 3947 old_net_conf->csums_alg, p->csums_alg); 3948 goto disconnect; 3949 } 3950 csums_tfm = drbd_crypto_alloc_digest_safe(device, 3951 p->csums_alg, "csums-alg"); 3952 if (IS_ERR(csums_tfm)) { 3953 csums_tfm = NULL; 3954 goto disconnect; 3955 } 3956 } 3957 3958 if (apv > 94 && new_disk_conf) { 3959 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead); 3960 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target); 3961 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target); 3962 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate); 3963 3964 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; 3965 if (fifo_size != device->rs_plan_s->size) { 3966 new_plan = fifo_alloc(fifo_size); 3967 if (!new_plan) { 3968 drbd_err(device, "kmalloc of fifo_buffer failed"); 3969 put_ldev(device); 3970 goto disconnect; 3971 } 3972 } 3973 } 3974 3975 if (verify_tfm || csums_tfm) { 3976 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 3977 if (!new_net_conf) 3978 goto disconnect; 3979 3980 *new_net_conf = *old_net_conf; 3981 3982 if (verify_tfm) { 3983 strcpy(new_net_conf->verify_alg, p->verify_alg); 3984 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; 3985 crypto_free_shash(peer_device->connection->verify_tfm); 3986 peer_device->connection->verify_tfm = verify_tfm; 3987 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); 3988 } 3989 if (csums_tfm) { 3990 strcpy(new_net_conf->csums_alg, p->csums_alg); 3991 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; 3992 crypto_free_shash(peer_device->connection->csums_tfm); 3993 peer_device->connection->csums_tfm = csums_tfm; 3994 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg); 3995 } 3996 rcu_assign_pointer(connection->net_conf, new_net_conf); 3997 } 3998 } 3999 4000 if (new_disk_conf) { 4001 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 4002 put_ldev(device); 4003 } 4004 4005 if (new_plan) { 4006 old_plan = device->rs_plan_s; 4007 rcu_assign_pointer(device->rs_plan_s, new_plan); 4008 } 4009 4010 mutex_unlock(&connection->resource->conf_update); 4011 synchronize_rcu(); 4012 if (new_net_conf) 4013 kfree(old_net_conf); 4014 kfree(old_disk_conf); 4015 kfree(old_plan); 4016 4017 return 0; 4018 4019 reconnect: 4020 if (new_disk_conf) { 4021 put_ldev(device); 4022 kfree(new_disk_conf); 4023 } 4024 mutex_unlock(&connection->resource->conf_update); 4025 return -EIO; 4026 4027 disconnect: 4028 kfree(new_plan); 4029 if (new_disk_conf) { 4030 put_ldev(device); 4031 kfree(new_disk_conf); 4032 } 4033 mutex_unlock(&connection->resource->conf_update); 4034 /* just for completeness: actually not needed, 4035 * as this is not reached if csums_tfm was ok. */ 4036 crypto_free_shash(csums_tfm); 4037 /* but free the verify_tfm again, if csums_tfm did not work out */ 4038 crypto_free_shash(verify_tfm); 4039 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4040 return -EIO; 4041 } 4042 4043 /* warn if the arguments differ by more than 12.5% */ 4044 static void warn_if_differ_considerably(struct drbd_device *device, 4045 const char *s, sector_t a, sector_t b) 4046 { 4047 sector_t d; 4048 if (a == 0 || b == 0) 4049 return; 4050 d = (a > b) ? (a - b) : (b - a); 4051 if (d > (a>>3) || d > (b>>3)) 4052 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s, 4053 (unsigned long long)a, (unsigned long long)b); 4054 } 4055 4056 static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi) 4057 { 4058 struct drbd_peer_device *peer_device; 4059 struct drbd_device *device; 4060 struct p_sizes *p = pi->data; 4061 struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL; 4062 enum determine_dev_size dd = DS_UNCHANGED; 4063 sector_t p_size, p_usize, p_csize, my_usize; 4064 sector_t new_size, cur_size; 4065 int ldsc = 0; /* local disk size changed */ 4066 enum dds_flags ddsf; 4067 4068 peer_device = conn_peer_device(connection, pi->vnr); 4069 if (!peer_device) 4070 return config_unknown_volume(connection, pi); 4071 device = peer_device->device; 4072 cur_size = get_capacity(device->vdisk); 4073 4074 p_size = be64_to_cpu(p->d_size); 4075 p_usize = be64_to_cpu(p->u_size); 4076 p_csize = be64_to_cpu(p->c_size); 4077 4078 /* just store the peer's disk size for now. 4079 * we still need to figure out whether we accept that. */ 4080 device->p_size = p_size; 4081 4082 if (get_ldev(device)) { 4083 rcu_read_lock(); 4084 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size; 4085 rcu_read_unlock(); 4086 4087 warn_if_differ_considerably(device, "lower level device sizes", 4088 p_size, drbd_get_max_capacity(device->ldev)); 4089 warn_if_differ_considerably(device, "user requested size", 4090 p_usize, my_usize); 4091 4092 /* if this is the first connect, or an otherwise expected 4093 * param exchange, choose the minimum */ 4094 if (device->state.conn == C_WF_REPORT_PARAMS) 4095 p_usize = min_not_zero(my_usize, p_usize); 4096 4097 /* Never shrink a device with usable data during connect, 4098 * or "attach" on the peer. 4099 * But allow online shrinking if we are connected. */ 4100 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0); 4101 if (new_size < cur_size && 4102 device->state.disk >= D_OUTDATED && 4103 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) { 4104 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n", 4105 (unsigned long long)new_size, (unsigned long long)cur_size); 4106 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4107 put_ldev(device); 4108 return -EIO; 4109 } 4110 4111 if (my_usize != p_usize) { 4112 struct disk_conf *old_disk_conf, *new_disk_conf = NULL; 4113 4114 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 4115 if (!new_disk_conf) { 4116 put_ldev(device); 4117 return -ENOMEM; 4118 } 4119 4120 mutex_lock(&connection->resource->conf_update); 4121 old_disk_conf = device->ldev->disk_conf; 4122 *new_disk_conf = *old_disk_conf; 4123 new_disk_conf->disk_size = p_usize; 4124 4125 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 4126 mutex_unlock(&connection->resource->conf_update); 4127 kvfree_rcu(old_disk_conf); 4128 4129 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n", 4130 (unsigned long)p_usize, (unsigned long)my_usize); 4131 } 4132 4133 put_ldev(device); 4134 } 4135 4136 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); 4137 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size(). 4138 In case we cleared the QUEUE_FLAG_DISCARD from our queue in 4139 drbd_reconsider_queue_parameters(), we can be sure that after 4140 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ 4141 4142 ddsf = be16_to_cpu(p->dds_flags); 4143 if (get_ldev(device)) { 4144 drbd_reconsider_queue_parameters(device, device->ldev, o); 4145 dd = drbd_determine_dev_size(device, ddsf, NULL); 4146 put_ldev(device); 4147 if (dd == DS_ERROR) 4148 return -EIO; 4149 drbd_md_sync(device); 4150 } else { 4151 /* 4152 * I am diskless, need to accept the peer's *current* size. 4153 * I must NOT accept the peers backing disk size, 4154 * it may have been larger than mine all along... 4155 * 4156 * At this point, the peer knows more about my disk, or at 4157 * least about what we last agreed upon, than myself. 4158 * So if his c_size is less than his d_size, the most likely 4159 * reason is that *my* d_size was smaller last time we checked. 4160 * 4161 * However, if he sends a zero current size, 4162 * take his (user-capped or) backing disk size anyways. 4163 * 4164 * Unless of course he does not have a disk himself. 4165 * In which case we ignore this completely. 4166 */ 4167 sector_t new_size = p_csize ?: p_usize ?: p_size; 4168 drbd_reconsider_queue_parameters(device, NULL, o); 4169 if (new_size == 0) { 4170 /* Ignore, peer does not know nothing. */ 4171 } else if (new_size == cur_size) { 4172 /* nothing to do */ 4173 } else if (cur_size != 0 && p_size == 0) { 4174 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n", 4175 (unsigned long long)new_size, (unsigned long long)cur_size); 4176 } else if (new_size < cur_size && device->state.role == R_PRIMARY) { 4177 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n", 4178 (unsigned long long)new_size, (unsigned long long)cur_size); 4179 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4180 return -EIO; 4181 } else { 4182 /* I believe the peer, if 4183 * - I don't have a current size myself 4184 * - we agree on the size anyways 4185 * - I do have a current size, am Secondary, 4186 * and he has the only disk 4187 * - I do have a current size, am Primary, 4188 * and he has the only disk, 4189 * which is larger than my current size 4190 */ 4191 drbd_set_my_capacity(device, new_size); 4192 } 4193 } 4194 4195 if (get_ldev(device)) { 4196 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) { 4197 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); 4198 ldsc = 1; 4199 } 4200 4201 put_ldev(device); 4202 } 4203 4204 if (device->state.conn > C_WF_REPORT_PARAMS) { 4205 if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) || 4206 ldsc) { 4207 /* we have different sizes, probably peer 4208 * needs to know my new size... */ 4209 drbd_send_sizes(peer_device, 0, ddsf); 4210 } 4211 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) || 4212 (dd == DS_GREW && device->state.conn == C_CONNECTED)) { 4213 if (device->state.pdsk >= D_INCONSISTENT && 4214 device->state.disk >= D_INCONSISTENT) { 4215 if (ddsf & DDSF_NO_RESYNC) 4216 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n"); 4217 else 4218 resync_after_online_grow(device); 4219 } else 4220 set_bit(RESYNC_AFTER_NEG, &device->flags); 4221 } 4222 } 4223 4224 return 0; 4225 } 4226 4227 static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi) 4228 { 4229 struct drbd_peer_device *peer_device; 4230 struct drbd_device *device; 4231 struct p_uuids *p = pi->data; 4232 u64 *p_uuid; 4233 int i, updated_uuids = 0; 4234 4235 peer_device = conn_peer_device(connection, pi->vnr); 4236 if (!peer_device) 4237 return config_unknown_volume(connection, pi); 4238 device = peer_device->device; 4239 4240 p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO); 4241 if (!p_uuid) 4242 return false; 4243 4244 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 4245 p_uuid[i] = be64_to_cpu(p->uuid[i]); 4246 4247 kfree(device->p_uuid); 4248 device->p_uuid = p_uuid; 4249 4250 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) && 4251 device->state.disk < D_INCONSISTENT && 4252 device->state.role == R_PRIMARY && 4253 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 4254 drbd_err(device, "Can only connect to data with current UUID=%016llX\n", 4255 (unsigned long long)device->ed_uuid); 4256 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4257 return -EIO; 4258 } 4259 4260 if (get_ldev(device)) { 4261 int skip_initial_sync = 4262 device->state.conn == C_CONNECTED && 4263 peer_device->connection->agreed_pro_version >= 90 && 4264 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 4265 (p_uuid[UI_FLAGS] & 8); 4266 if (skip_initial_sync) { 4267 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n"); 4268 drbd_bitmap_io(device, &drbd_bmio_clear_n_write, 4269 "clear_n_write from receive_uuids", 4270 BM_LOCKED_TEST_ALLOWED); 4271 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]); 4272 _drbd_uuid_set(device, UI_BITMAP, 0); 4273 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 4274 CS_VERBOSE, NULL); 4275 drbd_md_sync(device); 4276 updated_uuids = 1; 4277 } 4278 put_ldev(device); 4279 } else if (device->state.disk < D_INCONSISTENT && 4280 device->state.role == R_PRIMARY) { 4281 /* I am a diskless primary, the peer just created a new current UUID 4282 for me. */ 4283 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); 4284 } 4285 4286 /* Before we test for the disk state, we should wait until an eventually 4287 ongoing cluster wide state change is finished. That is important if 4288 we are primary and are detaching from our disk. We need to see the 4289 new disk state... */ 4290 mutex_lock(device->state_mutex); 4291 mutex_unlock(device->state_mutex); 4292 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT) 4293 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); 4294 4295 if (updated_uuids) 4296 drbd_print_uuids(device, "receiver updated UUIDs to"); 4297 4298 return 0; 4299 } 4300 4301 /** 4302 * convert_state() - Converts the peer's view of the cluster state to our point of view 4303 * @ps: The state as seen by the peer. 4304 */ 4305 static union drbd_state convert_state(union drbd_state ps) 4306 { 4307 union drbd_state ms; 4308 4309 static enum drbd_conns c_tab[] = { 4310 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS, 4311 [C_CONNECTED] = C_CONNECTED, 4312 4313 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, 4314 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, 4315 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ 4316 [C_VERIFY_S] = C_VERIFY_T, 4317 [C_MASK] = C_MASK, 4318 }; 4319 4320 ms.i = ps.i; 4321 4322 ms.conn = c_tab[ps.conn]; 4323 ms.peer = ps.role; 4324 ms.role = ps.peer; 4325 ms.pdsk = ps.disk; 4326 ms.disk = ps.pdsk; 4327 ms.peer_isp = (ps.aftr_isp | ps.user_isp); 4328 4329 return ms; 4330 } 4331 4332 static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi) 4333 { 4334 struct drbd_peer_device *peer_device; 4335 struct drbd_device *device; 4336 struct p_req_state *p = pi->data; 4337 union drbd_state mask, val; 4338 enum drbd_state_rv rv; 4339 4340 peer_device = conn_peer_device(connection, pi->vnr); 4341 if (!peer_device) 4342 return -EIO; 4343 device = peer_device->device; 4344 4345 mask.i = be32_to_cpu(p->mask); 4346 val.i = be32_to_cpu(p->val); 4347 4348 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) && 4349 mutex_is_locked(device->state_mutex)) { 4350 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG); 4351 return 0; 4352 } 4353 4354 mask = convert_state(mask); 4355 val = convert_state(val); 4356 4357 rv = drbd_change_state(device, CS_VERBOSE, mask, val); 4358 drbd_send_sr_reply(peer_device, rv); 4359 4360 drbd_md_sync(device); 4361 4362 return 0; 4363 } 4364 4365 static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi) 4366 { 4367 struct p_req_state *p = pi->data; 4368 union drbd_state mask, val; 4369 enum drbd_state_rv rv; 4370 4371 mask.i = be32_to_cpu(p->mask); 4372 val.i = be32_to_cpu(p->val); 4373 4374 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) && 4375 mutex_is_locked(&connection->cstate_mutex)) { 4376 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG); 4377 return 0; 4378 } 4379 4380 mask = convert_state(mask); 4381 val = convert_state(val); 4382 4383 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL); 4384 conn_send_sr_reply(connection, rv); 4385 4386 return 0; 4387 } 4388 4389 static int receive_state(struct drbd_connection *connection, struct packet_info *pi) 4390 { 4391 struct drbd_peer_device *peer_device; 4392 struct drbd_device *device; 4393 struct p_state *p = pi->data; 4394 union drbd_state os, ns, peer_state; 4395 enum drbd_disk_state real_peer_disk; 4396 enum chg_state_flags cs_flags; 4397 int rv; 4398 4399 peer_device = conn_peer_device(connection, pi->vnr); 4400 if (!peer_device) 4401 return config_unknown_volume(connection, pi); 4402 device = peer_device->device; 4403 4404 peer_state.i = be32_to_cpu(p->state); 4405 4406 real_peer_disk = peer_state.disk; 4407 if (peer_state.disk == D_NEGOTIATING) { 4408 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 4409 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 4410 } 4411 4412 spin_lock_irq(&device->resource->req_lock); 4413 retry: 4414 os = ns = drbd_read_state(device); 4415 spin_unlock_irq(&device->resource->req_lock); 4416 4417 /* If some other part of the code (ack_receiver thread, timeout) 4418 * already decided to close the connection again, 4419 * we must not "re-establish" it here. */ 4420 if (os.conn <= C_TEAR_DOWN) 4421 return -ECONNRESET; 4422 4423 /* If this is the "end of sync" confirmation, usually the peer disk 4424 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits 4425 * set) resync started in PausedSyncT, or if the timing of pause-/ 4426 * unpause-sync events has been "just right", the peer disk may 4427 * transition from D_CONSISTENT to D_UP_TO_DATE as well. 4428 */ 4429 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) && 4430 real_peer_disk == D_UP_TO_DATE && 4431 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { 4432 /* If we are (becoming) SyncSource, but peer is still in sync 4433 * preparation, ignore its uptodate-ness to avoid flapping, it 4434 * will change to inconsistent once the peer reaches active 4435 * syncing states. 4436 * It may have changed syncer-paused flags, however, so we 4437 * cannot ignore this completely. */ 4438 if (peer_state.conn > C_CONNECTED && 4439 peer_state.conn < C_SYNC_SOURCE) 4440 real_peer_disk = D_INCONSISTENT; 4441 4442 /* if peer_state changes to connected at the same time, 4443 * it explicitly notifies us that it finished resync. 4444 * Maybe we should finish it up, too? */ 4445 else if (os.conn >= C_SYNC_SOURCE && 4446 peer_state.conn == C_CONNECTED) { 4447 if (drbd_bm_total_weight(device) <= device->rs_failed) 4448 drbd_resync_finished(device); 4449 return 0; 4450 } 4451 } 4452 4453 /* explicit verify finished notification, stop sector reached. */ 4454 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE && 4455 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) { 4456 ov_out_of_sync_print(device); 4457 drbd_resync_finished(device); 4458 return 0; 4459 } 4460 4461 /* peer says his disk is inconsistent, while we think it is uptodate, 4462 * and this happens while the peer still thinks we have a sync going on, 4463 * but we think we are already done with the sync. 4464 * We ignore this to avoid flapping pdsk. 4465 * This should not happen, if the peer is a recent version of drbd. */ 4466 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && 4467 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) 4468 real_peer_disk = D_UP_TO_DATE; 4469 4470 if (ns.conn == C_WF_REPORT_PARAMS) 4471 ns.conn = C_CONNECTED; 4472 4473 if (peer_state.conn == C_AHEAD) 4474 ns.conn = C_BEHIND; 4475 4476 /* TODO: 4477 * if (primary and diskless and peer uuid != effective uuid) 4478 * abort attach on peer; 4479 * 4480 * If this node does not have good data, was already connected, but 4481 * the peer did a late attach only now, trying to "negotiate" with me, 4482 * AND I am currently Primary, possibly frozen, with some specific 4483 * "effective" uuid, this should never be reached, really, because 4484 * we first send the uuids, then the current state. 4485 * 4486 * In this scenario, we already dropped the connection hard 4487 * when we received the unsuitable uuids (receive_uuids(). 4488 * 4489 * Should we want to change this, that is: not drop the connection in 4490 * receive_uuids() already, then we would need to add a branch here 4491 * that aborts the attach of "unsuitable uuids" on the peer in case 4492 * this node is currently Diskless Primary. 4493 */ 4494 4495 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING && 4496 get_ldev_if_state(device, D_NEGOTIATING)) { 4497 int cr; /* consider resync */ 4498 4499 /* if we established a new connection */ 4500 cr = (os.conn < C_CONNECTED); 4501 /* if we had an established connection 4502 * and one of the nodes newly attaches a disk */ 4503 cr |= (os.conn == C_CONNECTED && 4504 (peer_state.disk == D_NEGOTIATING || 4505 os.disk == D_NEGOTIATING)); 4506 /* if we have both been inconsistent, and the peer has been 4507 * forced to be UpToDate with --force */ 4508 cr |= test_bit(CONSIDER_RESYNC, &device->flags); 4509 /* if we had been plain connected, and the admin requested to 4510 * start a sync by "invalidate" or "invalidate-remote" */ 4511 cr |= (os.conn == C_CONNECTED && 4512 (peer_state.conn >= C_STARTING_SYNC_S && 4513 peer_state.conn <= C_WF_BITMAP_T)); 4514 4515 if (cr) 4516 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk); 4517 4518 put_ldev(device); 4519 if (ns.conn == C_MASK) { 4520 ns.conn = C_CONNECTED; 4521 if (device->state.disk == D_NEGOTIATING) { 4522 drbd_force_state(device, NS(disk, D_FAILED)); 4523 } else if (peer_state.disk == D_NEGOTIATING) { 4524 drbd_err(device, "Disk attach process on the peer node was aborted.\n"); 4525 peer_state.disk = D_DISKLESS; 4526 real_peer_disk = D_DISKLESS; 4527 } else { 4528 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags)) 4529 return -EIO; 4530 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS); 4531 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4532 return -EIO; 4533 } 4534 } 4535 } 4536 4537 spin_lock_irq(&device->resource->req_lock); 4538 if (os.i != drbd_read_state(device).i) 4539 goto retry; 4540 clear_bit(CONSIDER_RESYNC, &device->flags); 4541 ns.peer = peer_state.role; 4542 ns.pdsk = real_peer_disk; 4543 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 4544 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 4545 ns.disk = device->new_state_tmp.disk; 4546 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); 4547 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && 4548 test_bit(NEW_CUR_UUID, &device->flags)) { 4549 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 4550 for temporal network outages! */ 4551 spin_unlock_irq(&device->resource->req_lock); 4552 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 4553 tl_clear(peer_device->connection); 4554 drbd_uuid_new_current(device); 4555 clear_bit(NEW_CUR_UUID, &device->flags); 4556 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); 4557 return -EIO; 4558 } 4559 rv = _drbd_set_state(device, ns, cs_flags, NULL); 4560 ns = drbd_read_state(device); 4561 spin_unlock_irq(&device->resource->req_lock); 4562 4563 if (rv < SS_SUCCESS) { 4564 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4565 return -EIO; 4566 } 4567 4568 if (os.conn > C_WF_REPORT_PARAMS) { 4569 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 4570 peer_state.disk != D_NEGOTIATING ) { 4571 /* we want resync, peer has not yet decided to sync... */ 4572 /* Nowadays only used when forcing a node into primary role and 4573 setting its disk to UpToDate with that */ 4574 drbd_send_uuids(peer_device); 4575 drbd_send_current_state(peer_device); 4576 } 4577 } 4578 4579 clear_bit(DISCARD_MY_DATA, &device->flags); 4580 4581 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */ 4582 4583 return 0; 4584 } 4585 4586 static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi) 4587 { 4588 struct drbd_peer_device *peer_device; 4589 struct drbd_device *device; 4590 struct p_rs_uuid *p = pi->data; 4591 4592 peer_device = conn_peer_device(connection, pi->vnr); 4593 if (!peer_device) 4594 return -EIO; 4595 device = peer_device->device; 4596 4597 wait_event(device->misc_wait, 4598 device->state.conn == C_WF_SYNC_UUID || 4599 device->state.conn == C_BEHIND || 4600 device->state.conn < C_CONNECTED || 4601 device->state.disk < D_NEGOTIATING); 4602 4603 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */ 4604 4605 /* Here the _drbd_uuid_ functions are right, current should 4606 _not_ be rotated into the history */ 4607 if (get_ldev_if_state(device, D_NEGOTIATING)) { 4608 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid)); 4609 _drbd_uuid_set(device, UI_BITMAP, 0UL); 4610 4611 drbd_print_uuids(device, "updated sync uuid"); 4612 drbd_start_resync(device, C_SYNC_TARGET); 4613 4614 put_ldev(device); 4615 } else 4616 drbd_err(device, "Ignoring SyncUUID packet!\n"); 4617 4618 return 0; 4619 } 4620 4621 /* 4622 * receive_bitmap_plain 4623 * 4624 * Return 0 when done, 1 when another iteration is needed, and a negative error 4625 * code upon failure. 4626 */ 4627 static int 4628 receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size, 4629 unsigned long *p, struct bm_xfer_ctx *c) 4630 { 4631 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - 4632 drbd_header_size(peer_device->connection); 4633 unsigned int num_words = min_t(size_t, data_size / sizeof(*p), 4634 c->bm_words - c->word_offset); 4635 unsigned int want = num_words * sizeof(*p); 4636 int err; 4637 4638 if (want != size) { 4639 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size); 4640 return -EIO; 4641 } 4642 if (want == 0) 4643 return 0; 4644 err = drbd_recv_all(peer_device->connection, p, want); 4645 if (err) 4646 return err; 4647 4648 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p); 4649 4650 c->word_offset += num_words; 4651 c->bit_offset = c->word_offset * BITS_PER_LONG; 4652 if (c->bit_offset > c->bm_bits) 4653 c->bit_offset = c->bm_bits; 4654 4655 return 1; 4656 } 4657 4658 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p) 4659 { 4660 return (enum drbd_bitmap_code)(p->encoding & 0x0f); 4661 } 4662 4663 static int dcbp_get_start(struct p_compressed_bm *p) 4664 { 4665 return (p->encoding & 0x80) != 0; 4666 } 4667 4668 static int dcbp_get_pad_bits(struct p_compressed_bm *p) 4669 { 4670 return (p->encoding >> 4) & 0x7; 4671 } 4672 4673 /* 4674 * recv_bm_rle_bits 4675 * 4676 * Return 0 when done, 1 when another iteration is needed, and a negative error 4677 * code upon failure. 4678 */ 4679 static int 4680 recv_bm_rle_bits(struct drbd_peer_device *peer_device, 4681 struct p_compressed_bm *p, 4682 struct bm_xfer_ctx *c, 4683 unsigned int len) 4684 { 4685 struct bitstream bs; 4686 u64 look_ahead; 4687 u64 rl; 4688 u64 tmp; 4689 unsigned long s = c->bit_offset; 4690 unsigned long e; 4691 int toggle = dcbp_get_start(p); 4692 int have; 4693 int bits; 4694 4695 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p)); 4696 4697 bits = bitstream_get_bits(&bs, &look_ahead, 64); 4698 if (bits < 0) 4699 return -EIO; 4700 4701 for (have = bits; have > 0; s += rl, toggle = !toggle) { 4702 bits = vli_decode_bits(&rl, look_ahead); 4703 if (bits <= 0) 4704 return -EIO; 4705 4706 if (toggle) { 4707 e = s + rl -1; 4708 if (e >= c->bm_bits) { 4709 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 4710 return -EIO; 4711 } 4712 _drbd_bm_set_bits(peer_device->device, s, e); 4713 } 4714 4715 if (have < bits) { 4716 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 4717 have, bits, look_ahead, 4718 (unsigned int)(bs.cur.b - p->code), 4719 (unsigned int)bs.buf_len); 4720 return -EIO; 4721 } 4722 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */ 4723 if (likely(bits < 64)) 4724 look_ahead >>= bits; 4725 else 4726 look_ahead = 0; 4727 have -= bits; 4728 4729 bits = bitstream_get_bits(&bs, &tmp, 64 - have); 4730 if (bits < 0) 4731 return -EIO; 4732 look_ahead |= tmp << have; 4733 have += bits; 4734 } 4735 4736 c->bit_offset = s; 4737 bm_xfer_ctx_bit_to_word_offset(c); 4738 4739 return (s != c->bm_bits); 4740 } 4741 4742 /* 4743 * decode_bitmap_c 4744 * 4745 * Return 0 when done, 1 when another iteration is needed, and a negative error 4746 * code upon failure. 4747 */ 4748 static int 4749 decode_bitmap_c(struct drbd_peer_device *peer_device, 4750 struct p_compressed_bm *p, 4751 struct bm_xfer_ctx *c, 4752 unsigned int len) 4753 { 4754 if (dcbp_get_code(p) == RLE_VLI_Bits) 4755 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p)); 4756 4757 /* other variants had been implemented for evaluation, 4758 * but have been dropped as this one turned out to be "best" 4759 * during all our tests. */ 4760 4761 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 4762 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 4763 return -EIO; 4764 } 4765 4766 void INFO_bm_xfer_stats(struct drbd_device *device, 4767 const char *direction, struct bm_xfer_ctx *c) 4768 { 4769 /* what would it take to transfer it "plaintext" */ 4770 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection); 4771 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; 4772 unsigned int plain = 4773 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + 4774 c->bm_words * sizeof(unsigned long); 4775 unsigned int total = c->bytes[0] + c->bytes[1]; 4776 unsigned int r; 4777 4778 /* total can not be zero. but just in case: */ 4779 if (total == 0) 4780 return; 4781 4782 /* don't report if not compressed */ 4783 if (total >= plain) 4784 return; 4785 4786 /* total < plain. check for overflow, still */ 4787 r = (total > UINT_MAX/1000) ? (total / (plain/1000)) 4788 : (1000 * total / plain); 4789 4790 if (r > 1000) 4791 r = 1000; 4792 4793 r = 1000 - r; 4794 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 4795 "total %u; compression: %u.%u%%\n", 4796 direction, 4797 c->bytes[1], c->packets[1], 4798 c->bytes[0], c->packets[0], 4799 total, r/10, r % 10); 4800 } 4801 4802 /* Since we are processing the bitfield from lower addresses to higher, 4803 it does not matter if the process it in 32 bit chunks or 64 bit 4804 chunks as long as it is little endian. (Understand it as byte stream, 4805 beginning with the lowest byte...) If we would use big endian 4806 we would need to process it from the highest address to the lowest, 4807 in order to be agnostic to the 32 vs 64 bits issue. 4808 4809 returns 0 on failure, 1 if we successfully received it. */ 4810 static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi) 4811 { 4812 struct drbd_peer_device *peer_device; 4813 struct drbd_device *device; 4814 struct bm_xfer_ctx c; 4815 int err; 4816 4817 peer_device = conn_peer_device(connection, pi->vnr); 4818 if (!peer_device) 4819 return -EIO; 4820 device = peer_device->device; 4821 4822 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED); 4823 /* you are supposed to send additional out-of-sync information 4824 * if you actually set bits during this phase */ 4825 4826 c = (struct bm_xfer_ctx) { 4827 .bm_bits = drbd_bm_bits(device), 4828 .bm_words = drbd_bm_words(device), 4829 }; 4830 4831 for(;;) { 4832 if (pi->cmd == P_BITMAP) 4833 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c); 4834 else if (pi->cmd == P_COMPRESSED_BITMAP) { 4835 /* MAYBE: sanity check that we speak proto >= 90, 4836 * and the feature is enabled! */ 4837 struct p_compressed_bm *p = pi->data; 4838 4839 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) { 4840 drbd_err(device, "ReportCBitmap packet too large\n"); 4841 err = -EIO; 4842 goto out; 4843 } 4844 if (pi->size <= sizeof(*p)) { 4845 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size); 4846 err = -EIO; 4847 goto out; 4848 } 4849 err = drbd_recv_all(peer_device->connection, p, pi->size); 4850 if (err) 4851 goto out; 4852 err = decode_bitmap_c(peer_device, p, &c, pi->size); 4853 } else { 4854 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd); 4855 err = -EIO; 4856 goto out; 4857 } 4858 4859 c.packets[pi->cmd == P_BITMAP]++; 4860 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size; 4861 4862 if (err <= 0) { 4863 if (err < 0) 4864 goto out; 4865 break; 4866 } 4867 err = drbd_recv_header(peer_device->connection, pi); 4868 if (err) 4869 goto out; 4870 } 4871 4872 INFO_bm_xfer_stats(device, "receive", &c); 4873 4874 if (device->state.conn == C_WF_BITMAP_T) { 4875 enum drbd_state_rv rv; 4876 4877 err = drbd_send_bitmap(device); 4878 if (err) 4879 goto out; 4880 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 4881 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 4882 D_ASSERT(device, rv == SS_SUCCESS); 4883 } else if (device->state.conn != C_WF_BITMAP_S) { 4884 /* admin may have requested C_DISCONNECTING, 4885 * other threads may have noticed network errors */ 4886 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n", 4887 drbd_conn_str(device->state.conn)); 4888 } 4889 err = 0; 4890 4891 out: 4892 drbd_bm_unlock(device); 4893 if (!err && device->state.conn == C_WF_BITMAP_S) 4894 drbd_start_resync(device, C_SYNC_SOURCE); 4895 return err; 4896 } 4897 4898 static int receive_skip(struct drbd_connection *connection, struct packet_info *pi) 4899 { 4900 drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n", 4901 pi->cmd, pi->size); 4902 4903 return ignore_remaining_packet(connection, pi); 4904 } 4905 4906 static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi) 4907 { 4908 /* Make sure we've acked all the TCP data associated 4909 * with the data requests being unplugged */ 4910 tcp_sock_set_quickack(connection->data.socket->sk, 2); 4911 return 0; 4912 } 4913 4914 static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi) 4915 { 4916 struct drbd_peer_device *peer_device; 4917 struct drbd_device *device; 4918 struct p_block_desc *p = pi->data; 4919 4920 peer_device = conn_peer_device(connection, pi->vnr); 4921 if (!peer_device) 4922 return -EIO; 4923 device = peer_device->device; 4924 4925 switch (device->state.conn) { 4926 case C_WF_SYNC_UUID: 4927 case C_WF_BITMAP_T: 4928 case C_BEHIND: 4929 break; 4930 default: 4931 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", 4932 drbd_conn_str(device->state.conn)); 4933 } 4934 4935 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 4936 4937 return 0; 4938 } 4939 4940 static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi) 4941 { 4942 struct drbd_peer_device *peer_device; 4943 struct p_block_desc *p = pi->data; 4944 struct drbd_device *device; 4945 sector_t sector; 4946 int size, err = 0; 4947 4948 peer_device = conn_peer_device(connection, pi->vnr); 4949 if (!peer_device) 4950 return -EIO; 4951 device = peer_device->device; 4952 4953 sector = be64_to_cpu(p->sector); 4954 size = be32_to_cpu(p->blksize); 4955 4956 dec_rs_pending(device); 4957 4958 if (get_ldev(device)) { 4959 struct drbd_peer_request *peer_req; 4960 4961 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, 4962 size, 0, GFP_NOIO); 4963 if (!peer_req) { 4964 put_ldev(device); 4965 return -ENOMEM; 4966 } 4967 4968 peer_req->w.cb = e_end_resync_block; 4969 peer_req->opf = REQ_OP_DISCARD; 4970 peer_req->submit_jif = jiffies; 4971 peer_req->flags |= EE_TRIM; 4972 4973 spin_lock_irq(&device->resource->req_lock); 4974 list_add_tail(&peer_req->w.list, &device->sync_ee); 4975 spin_unlock_irq(&device->resource->req_lock); 4976 4977 atomic_add(pi->size >> 9, &device->rs_sect_ev); 4978 err = drbd_submit_peer_request(peer_req); 4979 4980 if (err) { 4981 spin_lock_irq(&device->resource->req_lock); 4982 list_del(&peer_req->w.list); 4983 spin_unlock_irq(&device->resource->req_lock); 4984 4985 drbd_free_peer_req(device, peer_req); 4986 put_ldev(device); 4987 err = 0; 4988 goto fail; 4989 } 4990 4991 inc_unacked(device); 4992 4993 /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(), 4994 as well as drbd_rs_complete_io() */ 4995 } else { 4996 fail: 4997 drbd_rs_complete_io(device, sector); 4998 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); 4999 } 5000 5001 atomic_add(size >> 9, &device->rs_sect_in); 5002 5003 return err; 5004 } 5005 5006 struct data_cmd { 5007 int expect_payload; 5008 unsigned int pkt_size; 5009 int (*fn)(struct drbd_connection *, struct packet_info *); 5010 }; 5011 5012 static struct data_cmd drbd_cmd_handler[] = { 5013 [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, 5014 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, 5015 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , 5016 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , 5017 [P_BITMAP] = { 1, 0, receive_bitmap } , 5018 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } , 5019 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote }, 5020 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5021 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5022 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam }, 5023 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam }, 5024 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, 5025 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, 5026 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, 5027 [P_STATE] = { 0, sizeof(struct p_state), receive_state }, 5028 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, 5029 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, 5030 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5031 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 5032 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 5033 [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5034 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, 5035 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, 5036 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, 5037 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, 5038 [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data }, 5039 [P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data }, 5040 [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated }, 5041 }; 5042 5043 static void drbdd(struct drbd_connection *connection) 5044 { 5045 struct packet_info pi; 5046 size_t shs; /* sub header size */ 5047 int err; 5048 5049 while (get_t_state(&connection->receiver) == RUNNING) { 5050 struct data_cmd const *cmd; 5051 5052 drbd_thread_current_set_cpu(&connection->receiver); 5053 update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug); 5054 if (drbd_recv_header_maybe_unplug(connection, &pi)) 5055 goto err_out; 5056 5057 cmd = &drbd_cmd_handler[pi.cmd]; 5058 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) { 5059 drbd_err(connection, "Unexpected data packet %s (0x%04x)", 5060 cmdname(pi.cmd), pi.cmd); 5061 goto err_out; 5062 } 5063 5064 shs = cmd->pkt_size; 5065 if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME) 5066 shs += sizeof(struct o_qlim); 5067 if (pi.size > shs && !cmd->expect_payload) { 5068 drbd_err(connection, "No payload expected %s l:%d\n", 5069 cmdname(pi.cmd), pi.size); 5070 goto err_out; 5071 } 5072 if (pi.size < shs) { 5073 drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n", 5074 cmdname(pi.cmd), (int)shs, pi.size); 5075 goto err_out; 5076 } 5077 5078 if (shs) { 5079 update_receiver_timing_details(connection, drbd_recv_all_warn); 5080 err = drbd_recv_all_warn(connection, pi.data, shs); 5081 if (err) 5082 goto err_out; 5083 pi.size -= shs; 5084 } 5085 5086 update_receiver_timing_details(connection, cmd->fn); 5087 err = cmd->fn(connection, &pi); 5088 if (err) { 5089 drbd_err(connection, "error receiving %s, e: %d l: %d!\n", 5090 cmdname(pi.cmd), err, pi.size); 5091 goto err_out; 5092 } 5093 } 5094 return; 5095 5096 err_out: 5097 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 5098 } 5099 5100 static void conn_disconnect(struct drbd_connection *connection) 5101 { 5102 struct drbd_peer_device *peer_device; 5103 enum drbd_conns oc; 5104 int vnr; 5105 5106 if (connection->cstate == C_STANDALONE) 5107 return; 5108 5109 /* We are about to start the cleanup after connection loss. 5110 * Make sure drbd_make_request knows about that. 5111 * Usually we should be in some network failure state already, 5112 * but just in case we are not, we fix it up here. 5113 */ 5114 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 5115 5116 /* ack_receiver does not clean up anything. it must not interfere, either */ 5117 drbd_thread_stop(&connection->ack_receiver); 5118 if (connection->ack_sender) { 5119 destroy_workqueue(connection->ack_sender); 5120 connection->ack_sender = NULL; 5121 } 5122 drbd_free_sock(connection); 5123 5124 rcu_read_lock(); 5125 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 5126 struct drbd_device *device = peer_device->device; 5127 kref_get(&device->kref); 5128 rcu_read_unlock(); 5129 drbd_disconnected(peer_device); 5130 kref_put(&device->kref, drbd_destroy_device); 5131 rcu_read_lock(); 5132 } 5133 rcu_read_unlock(); 5134 5135 if (!list_empty(&connection->current_epoch->list)) 5136 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n"); 5137 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 5138 atomic_set(&connection->current_epoch->epoch_size, 0); 5139 connection->send.seen_any_write_yet = false; 5140 5141 drbd_info(connection, "Connection closed\n"); 5142 5143 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN) 5144 conn_try_outdate_peer_async(connection); 5145 5146 spin_lock_irq(&connection->resource->req_lock); 5147 oc = connection->cstate; 5148 if (oc >= C_UNCONNECTED) 5149 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); 5150 5151 spin_unlock_irq(&connection->resource->req_lock); 5152 5153 if (oc == C_DISCONNECTING) 5154 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); 5155 } 5156 5157 static int drbd_disconnected(struct drbd_peer_device *peer_device) 5158 { 5159 struct drbd_device *device = peer_device->device; 5160 unsigned int i; 5161 5162 /* wait for current activity to cease. */ 5163 spin_lock_irq(&device->resource->req_lock); 5164 _drbd_wait_ee_list_empty(device, &device->active_ee); 5165 _drbd_wait_ee_list_empty(device, &device->sync_ee); 5166 _drbd_wait_ee_list_empty(device, &device->read_ee); 5167 spin_unlock_irq(&device->resource->req_lock); 5168 5169 /* We do not have data structures that would allow us to 5170 * get the rs_pending_cnt down to 0 again. 5171 * * On C_SYNC_TARGET we do not have any data structures describing 5172 * the pending RSDataRequest's we have sent. 5173 * * On C_SYNC_SOURCE there is no data structure that tracks 5174 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. 5175 * And no, it is not the sum of the reference counts in the 5176 * resync_LRU. The resync_LRU tracks the whole operation including 5177 * the disk-IO, while the rs_pending_cnt only tracks the blocks 5178 * on the fly. */ 5179 drbd_rs_cancel_all(device); 5180 device->rs_total = 0; 5181 device->rs_failed = 0; 5182 atomic_set(&device->rs_pending_cnt, 0); 5183 wake_up(&device->misc_wait); 5184 5185 del_timer_sync(&device->resync_timer); 5186 resync_timer_fn(&device->resync_timer); 5187 5188 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 5189 * w_make_resync_request etc. which may still be on the worker queue 5190 * to be "canceled" */ 5191 drbd_flush_workqueue(&peer_device->connection->sender_work); 5192 5193 drbd_finish_peer_reqs(device); 5194 5195 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs() 5196 might have issued a work again. The one before drbd_finish_peer_reqs() is 5197 necessary to reclain net_ee in drbd_finish_peer_reqs(). */ 5198 drbd_flush_workqueue(&peer_device->connection->sender_work); 5199 5200 /* need to do it again, drbd_finish_peer_reqs() may have populated it 5201 * again via drbd_try_clear_on_disk_bm(). */ 5202 drbd_rs_cancel_all(device); 5203 5204 kfree(device->p_uuid); 5205 device->p_uuid = NULL; 5206 5207 if (!drbd_suspended(device)) 5208 tl_clear(peer_device->connection); 5209 5210 drbd_md_sync(device); 5211 5212 if (get_ldev(device)) { 5213 drbd_bitmap_io(device, &drbd_bm_write_copy_pages, 5214 "write from disconnected", BM_LOCKED_CHANGE_ALLOWED); 5215 put_ldev(device); 5216 } 5217 5218 /* tcp_close and release of sendpage pages can be deferred. I don't 5219 * want to use SO_LINGER, because apparently it can be deferred for 5220 * more than 20 seconds (longest time I checked). 5221 * 5222 * Actually we don't care for exactly when the network stack does its 5223 * put_page(), but release our reference on these pages right here. 5224 */ 5225 i = drbd_free_peer_reqs(device, &device->net_ee); 5226 if (i) 5227 drbd_info(device, "net_ee not empty, killed %u entries\n", i); 5228 i = atomic_read(&device->pp_in_use_by_net); 5229 if (i) 5230 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); 5231 i = atomic_read(&device->pp_in_use); 5232 if (i) 5233 drbd_info(device, "pp_in_use = %d, expected 0\n", i); 5234 5235 D_ASSERT(device, list_empty(&device->read_ee)); 5236 D_ASSERT(device, list_empty(&device->active_ee)); 5237 D_ASSERT(device, list_empty(&device->sync_ee)); 5238 D_ASSERT(device, list_empty(&device->done_ee)); 5239 5240 return 0; 5241 } 5242 5243 /* 5244 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version 5245 * we can agree on is stored in agreed_pro_version. 5246 * 5247 * feature flags and the reserved array should be enough room for future 5248 * enhancements of the handshake protocol, and possible plugins... 5249 * 5250 * for now, they are expected to be zero, but ignored. 5251 */ 5252 static int drbd_send_features(struct drbd_connection *connection) 5253 { 5254 struct drbd_socket *sock; 5255 struct p_connection_features *p; 5256 5257 sock = &connection->data; 5258 p = conn_prepare_command(connection, sock); 5259 if (!p) 5260 return -EIO; 5261 memset(p, 0, sizeof(*p)); 5262 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 5263 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 5264 p->feature_flags = cpu_to_be32(PRO_FEATURES); 5265 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); 5266 } 5267 5268 /* 5269 * return values: 5270 * 1 yes, we have a valid connection 5271 * 0 oops, did not work out, please try again 5272 * -1 peer talks different language, 5273 * no point in trying again, please go standalone. 5274 */ 5275 static int drbd_do_features(struct drbd_connection *connection) 5276 { 5277 /* ASSERT current == connection->receiver ... */ 5278 struct p_connection_features *p; 5279 const int expect = sizeof(struct p_connection_features); 5280 struct packet_info pi; 5281 int err; 5282 5283 err = drbd_send_features(connection); 5284 if (err) 5285 return 0; 5286 5287 err = drbd_recv_header(connection, &pi); 5288 if (err) 5289 return 0; 5290 5291 if (pi.cmd != P_CONNECTION_FEATURES) { 5292 drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n", 5293 cmdname(pi.cmd), pi.cmd); 5294 return -1; 5295 } 5296 5297 if (pi.size != expect) { 5298 drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n", 5299 expect, pi.size); 5300 return -1; 5301 } 5302 5303 p = pi.data; 5304 err = drbd_recv_all_warn(connection, p, expect); 5305 if (err) 5306 return 0; 5307 5308 p->protocol_min = be32_to_cpu(p->protocol_min); 5309 p->protocol_max = be32_to_cpu(p->protocol_max); 5310 if (p->protocol_max == 0) 5311 p->protocol_max = p->protocol_min; 5312 5313 if (PRO_VERSION_MAX < p->protocol_min || 5314 PRO_VERSION_MIN > p->protocol_max) 5315 goto incompat; 5316 5317 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 5318 connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags); 5319 5320 drbd_info(connection, "Handshake successful: " 5321 "Agreed network protocol version %d\n", connection->agreed_pro_version); 5322 5323 drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s%s.\n", 5324 connection->agreed_features, 5325 connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "", 5326 connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "", 5327 connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : "", 5328 connection->agreed_features & DRBD_FF_WZEROES ? " WRITE_ZEROES" : 5329 connection->agreed_features ? "" : " none"); 5330 5331 return 1; 5332 5333 incompat: 5334 drbd_err(connection, "incompatible DRBD dialects: " 5335 "I support %d-%d, peer supports %d-%d\n", 5336 PRO_VERSION_MIN, PRO_VERSION_MAX, 5337 p->protocol_min, p->protocol_max); 5338 return -1; 5339 } 5340 5341 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 5342 static int drbd_do_auth(struct drbd_connection *connection) 5343 { 5344 drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 5345 drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 5346 return -1; 5347 } 5348 #else 5349 #define CHALLENGE_LEN 64 5350 5351 /* Return value: 5352 1 - auth succeeded, 5353 0 - failed, try again (network error), 5354 -1 - auth failed, don't try again. 5355 */ 5356 5357 static int drbd_do_auth(struct drbd_connection *connection) 5358 { 5359 struct drbd_socket *sock; 5360 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 5361 char *response = NULL; 5362 char *right_response = NULL; 5363 char *peers_ch = NULL; 5364 unsigned int key_len; 5365 char secret[SHARED_SECRET_MAX]; /* 64 byte */ 5366 unsigned int resp_size; 5367 struct shash_desc *desc; 5368 struct packet_info pi; 5369 struct net_conf *nc; 5370 int err, rv; 5371 5372 /* FIXME: Put the challenge/response into the preallocated socket buffer. */ 5373 5374 rcu_read_lock(); 5375 nc = rcu_dereference(connection->net_conf); 5376 key_len = strlen(nc->shared_secret); 5377 memcpy(secret, nc->shared_secret, key_len); 5378 rcu_read_unlock(); 5379 5380 desc = kmalloc(sizeof(struct shash_desc) + 5381 crypto_shash_descsize(connection->cram_hmac_tfm), 5382 GFP_KERNEL); 5383 if (!desc) { 5384 rv = -1; 5385 goto fail; 5386 } 5387 desc->tfm = connection->cram_hmac_tfm; 5388 5389 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); 5390 if (rv) { 5391 drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv); 5392 rv = -1; 5393 goto fail; 5394 } 5395 5396 get_random_bytes(my_challenge, CHALLENGE_LEN); 5397 5398 sock = &connection->data; 5399 if (!conn_prepare_command(connection, sock)) { 5400 rv = 0; 5401 goto fail; 5402 } 5403 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0, 5404 my_challenge, CHALLENGE_LEN); 5405 if (!rv) 5406 goto fail; 5407 5408 err = drbd_recv_header(connection, &pi); 5409 if (err) { 5410 rv = 0; 5411 goto fail; 5412 } 5413 5414 if (pi.cmd != P_AUTH_CHALLENGE) { 5415 drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n", 5416 cmdname(pi.cmd), pi.cmd); 5417 rv = -1; 5418 goto fail; 5419 } 5420 5421 if (pi.size > CHALLENGE_LEN * 2) { 5422 drbd_err(connection, "expected AuthChallenge payload too big.\n"); 5423 rv = -1; 5424 goto fail; 5425 } 5426 5427 if (pi.size < CHALLENGE_LEN) { 5428 drbd_err(connection, "AuthChallenge payload too small.\n"); 5429 rv = -1; 5430 goto fail; 5431 } 5432 5433 peers_ch = kmalloc(pi.size, GFP_NOIO); 5434 if (!peers_ch) { 5435 rv = -1; 5436 goto fail; 5437 } 5438 5439 err = drbd_recv_all_warn(connection, peers_ch, pi.size); 5440 if (err) { 5441 rv = 0; 5442 goto fail; 5443 } 5444 5445 if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) { 5446 drbd_err(connection, "Peer presented the same challenge!\n"); 5447 rv = -1; 5448 goto fail; 5449 } 5450 5451 resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm); 5452 response = kmalloc(resp_size, GFP_NOIO); 5453 if (!response) { 5454 rv = -1; 5455 goto fail; 5456 } 5457 5458 rv = crypto_shash_digest(desc, peers_ch, pi.size, response); 5459 if (rv) { 5460 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); 5461 rv = -1; 5462 goto fail; 5463 } 5464 5465 if (!conn_prepare_command(connection, sock)) { 5466 rv = 0; 5467 goto fail; 5468 } 5469 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0, 5470 response, resp_size); 5471 if (!rv) 5472 goto fail; 5473 5474 err = drbd_recv_header(connection, &pi); 5475 if (err) { 5476 rv = 0; 5477 goto fail; 5478 } 5479 5480 if (pi.cmd != P_AUTH_RESPONSE) { 5481 drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n", 5482 cmdname(pi.cmd), pi.cmd); 5483 rv = 0; 5484 goto fail; 5485 } 5486 5487 if (pi.size != resp_size) { 5488 drbd_err(connection, "expected AuthResponse payload of wrong size\n"); 5489 rv = 0; 5490 goto fail; 5491 } 5492 5493 err = drbd_recv_all_warn(connection, response , resp_size); 5494 if (err) { 5495 rv = 0; 5496 goto fail; 5497 } 5498 5499 right_response = kmalloc(resp_size, GFP_NOIO); 5500 if (!right_response) { 5501 rv = -1; 5502 goto fail; 5503 } 5504 5505 rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN, 5506 right_response); 5507 if (rv) { 5508 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); 5509 rv = -1; 5510 goto fail; 5511 } 5512 5513 rv = !memcmp(response, right_response, resp_size); 5514 5515 if (rv) 5516 drbd_info(connection, "Peer authenticated using %d bytes HMAC\n", 5517 resp_size); 5518 else 5519 rv = -1; 5520 5521 fail: 5522 kfree(peers_ch); 5523 kfree(response); 5524 kfree(right_response); 5525 if (desc) { 5526 shash_desc_zero(desc); 5527 kfree(desc); 5528 } 5529 5530 return rv; 5531 } 5532 #endif 5533 5534 int drbd_receiver(struct drbd_thread *thi) 5535 { 5536 struct drbd_connection *connection = thi->connection; 5537 int h; 5538 5539 drbd_info(connection, "receiver (re)started\n"); 5540 5541 do { 5542 h = conn_connect(connection); 5543 if (h == 0) { 5544 conn_disconnect(connection); 5545 schedule_timeout_interruptible(HZ); 5546 } 5547 if (h == -1) { 5548 drbd_warn(connection, "Discarding network configuration.\n"); 5549 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 5550 } 5551 } while (h == 0); 5552 5553 if (h > 0) { 5554 blk_start_plug(&connection->receiver_plug); 5555 drbdd(connection); 5556 blk_finish_plug(&connection->receiver_plug); 5557 } 5558 5559 conn_disconnect(connection); 5560 5561 drbd_info(connection, "receiver terminated\n"); 5562 return 0; 5563 } 5564 5565 /* ********* acknowledge sender ******** */ 5566 5567 static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi) 5568 { 5569 struct p_req_state_reply *p = pi->data; 5570 int retcode = be32_to_cpu(p->retcode); 5571 5572 if (retcode >= SS_SUCCESS) { 5573 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags); 5574 } else { 5575 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags); 5576 drbd_err(connection, "Requested state change failed by peer: %s (%d)\n", 5577 drbd_set_st_err_str(retcode), retcode); 5578 } 5579 wake_up(&connection->ping_wait); 5580 5581 return 0; 5582 } 5583 5584 static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi) 5585 { 5586 struct drbd_peer_device *peer_device; 5587 struct drbd_device *device; 5588 struct p_req_state_reply *p = pi->data; 5589 int retcode = be32_to_cpu(p->retcode); 5590 5591 peer_device = conn_peer_device(connection, pi->vnr); 5592 if (!peer_device) 5593 return -EIO; 5594 device = peer_device->device; 5595 5596 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) { 5597 D_ASSERT(device, connection->agreed_pro_version < 100); 5598 return got_conn_RqSReply(connection, pi); 5599 } 5600 5601 if (retcode >= SS_SUCCESS) { 5602 set_bit(CL_ST_CHG_SUCCESS, &device->flags); 5603 } else { 5604 set_bit(CL_ST_CHG_FAIL, &device->flags); 5605 drbd_err(device, "Requested state change failed by peer: %s (%d)\n", 5606 drbd_set_st_err_str(retcode), retcode); 5607 } 5608 wake_up(&device->state_wait); 5609 5610 return 0; 5611 } 5612 5613 static int got_Ping(struct drbd_connection *connection, struct packet_info *pi) 5614 { 5615 return drbd_send_ping_ack(connection); 5616 5617 } 5618 5619 static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi) 5620 { 5621 /* restore idle timeout */ 5622 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ; 5623 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags)) 5624 wake_up(&connection->ping_wait); 5625 5626 return 0; 5627 } 5628 5629 static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi) 5630 { 5631 struct drbd_peer_device *peer_device; 5632 struct drbd_device *device; 5633 struct p_block_ack *p = pi->data; 5634 sector_t sector = be64_to_cpu(p->sector); 5635 int blksize = be32_to_cpu(p->blksize); 5636 5637 peer_device = conn_peer_device(connection, pi->vnr); 5638 if (!peer_device) 5639 return -EIO; 5640 device = peer_device->device; 5641 5642 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); 5643 5644 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5645 5646 if (get_ldev(device)) { 5647 drbd_rs_complete_io(device, sector); 5648 drbd_set_in_sync(device, sector, blksize); 5649 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 5650 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 5651 put_ldev(device); 5652 } 5653 dec_rs_pending(device); 5654 atomic_add(blksize >> 9, &device->rs_sect_in); 5655 5656 return 0; 5657 } 5658 5659 static int 5660 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector, 5661 struct rb_root *root, const char *func, 5662 enum drbd_req_event what, bool missing_ok) 5663 { 5664 struct drbd_request *req; 5665 struct bio_and_error m; 5666 5667 spin_lock_irq(&device->resource->req_lock); 5668 req = find_request(device, root, id, sector, missing_ok, func); 5669 if (unlikely(!req)) { 5670 spin_unlock_irq(&device->resource->req_lock); 5671 return -EIO; 5672 } 5673 __req_mod(req, what, &m); 5674 spin_unlock_irq(&device->resource->req_lock); 5675 5676 if (m.bio) 5677 complete_master_bio(device, &m); 5678 return 0; 5679 } 5680 5681 static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi) 5682 { 5683 struct drbd_peer_device *peer_device; 5684 struct drbd_device *device; 5685 struct p_block_ack *p = pi->data; 5686 sector_t sector = be64_to_cpu(p->sector); 5687 int blksize = be32_to_cpu(p->blksize); 5688 enum drbd_req_event what; 5689 5690 peer_device = conn_peer_device(connection, pi->vnr); 5691 if (!peer_device) 5692 return -EIO; 5693 device = peer_device->device; 5694 5695 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5696 5697 if (p->block_id == ID_SYNCER) { 5698 drbd_set_in_sync(device, sector, blksize); 5699 dec_rs_pending(device); 5700 return 0; 5701 } 5702 switch (pi->cmd) { 5703 case P_RS_WRITE_ACK: 5704 what = WRITE_ACKED_BY_PEER_AND_SIS; 5705 break; 5706 case P_WRITE_ACK: 5707 what = WRITE_ACKED_BY_PEER; 5708 break; 5709 case P_RECV_ACK: 5710 what = RECV_ACKED_BY_PEER; 5711 break; 5712 case P_SUPERSEDED: 5713 what = CONFLICT_RESOLVED; 5714 break; 5715 case P_RETRY_WRITE: 5716 what = POSTPONE_WRITE; 5717 break; 5718 default: 5719 BUG(); 5720 } 5721 5722 return validate_req_change_req_state(device, p->block_id, sector, 5723 &device->write_requests, __func__, 5724 what, false); 5725 } 5726 5727 static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi) 5728 { 5729 struct drbd_peer_device *peer_device; 5730 struct drbd_device *device; 5731 struct p_block_ack *p = pi->data; 5732 sector_t sector = be64_to_cpu(p->sector); 5733 int size = be32_to_cpu(p->blksize); 5734 int err; 5735 5736 peer_device = conn_peer_device(connection, pi->vnr); 5737 if (!peer_device) 5738 return -EIO; 5739 device = peer_device->device; 5740 5741 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5742 5743 if (p->block_id == ID_SYNCER) { 5744 dec_rs_pending(device); 5745 drbd_rs_failed_io(device, sector, size); 5746 return 0; 5747 } 5748 5749 err = validate_req_change_req_state(device, p->block_id, sector, 5750 &device->write_requests, __func__, 5751 NEG_ACKED, true); 5752 if (err) { 5753 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. 5754 The master bio might already be completed, therefore the 5755 request is no longer in the collision hash. */ 5756 /* In Protocol B we might already have got a P_RECV_ACK 5757 but then get a P_NEG_ACK afterwards. */ 5758 drbd_set_out_of_sync(device, sector, size); 5759 } 5760 return 0; 5761 } 5762 5763 static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi) 5764 { 5765 struct drbd_peer_device *peer_device; 5766 struct drbd_device *device; 5767 struct p_block_ack *p = pi->data; 5768 sector_t sector = be64_to_cpu(p->sector); 5769 5770 peer_device = conn_peer_device(connection, pi->vnr); 5771 if (!peer_device) 5772 return -EIO; 5773 device = peer_device->device; 5774 5775 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5776 5777 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n", 5778 (unsigned long long)sector, be32_to_cpu(p->blksize)); 5779 5780 return validate_req_change_req_state(device, p->block_id, sector, 5781 &device->read_requests, __func__, 5782 NEG_ACKED, false); 5783 } 5784 5785 static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi) 5786 { 5787 struct drbd_peer_device *peer_device; 5788 struct drbd_device *device; 5789 sector_t sector; 5790 int size; 5791 struct p_block_ack *p = pi->data; 5792 5793 peer_device = conn_peer_device(connection, pi->vnr); 5794 if (!peer_device) 5795 return -EIO; 5796 device = peer_device->device; 5797 5798 sector = be64_to_cpu(p->sector); 5799 size = be32_to_cpu(p->blksize); 5800 5801 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5802 5803 dec_rs_pending(device); 5804 5805 if (get_ldev_if_state(device, D_FAILED)) { 5806 drbd_rs_complete_io(device, sector); 5807 switch (pi->cmd) { 5808 case P_NEG_RS_DREPLY: 5809 drbd_rs_failed_io(device, sector, size); 5810 break; 5811 case P_RS_CANCEL: 5812 break; 5813 default: 5814 BUG(); 5815 } 5816 put_ldev(device); 5817 } 5818 5819 return 0; 5820 } 5821 5822 static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi) 5823 { 5824 struct p_barrier_ack *p = pi->data; 5825 struct drbd_peer_device *peer_device; 5826 int vnr; 5827 5828 tl_release(connection, p->barrier, be32_to_cpu(p->set_size)); 5829 5830 rcu_read_lock(); 5831 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 5832 struct drbd_device *device = peer_device->device; 5833 5834 if (device->state.conn == C_AHEAD && 5835 atomic_read(&device->ap_in_flight) == 0 && 5836 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) { 5837 device->start_resync_timer.expires = jiffies + HZ; 5838 add_timer(&device->start_resync_timer); 5839 } 5840 } 5841 rcu_read_unlock(); 5842 5843 return 0; 5844 } 5845 5846 static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi) 5847 { 5848 struct drbd_peer_device *peer_device; 5849 struct drbd_device *device; 5850 struct p_block_ack *p = pi->data; 5851 struct drbd_device_work *dw; 5852 sector_t sector; 5853 int size; 5854 5855 peer_device = conn_peer_device(connection, pi->vnr); 5856 if (!peer_device) 5857 return -EIO; 5858 device = peer_device->device; 5859 5860 sector = be64_to_cpu(p->sector); 5861 size = be32_to_cpu(p->blksize); 5862 5863 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5864 5865 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 5866 drbd_ov_out_of_sync_found(device, sector, size); 5867 else 5868 ov_out_of_sync_print(device); 5869 5870 if (!get_ldev(device)) 5871 return 0; 5872 5873 drbd_rs_complete_io(device, sector); 5874 dec_rs_pending(device); 5875 5876 --device->ov_left; 5877 5878 /* let's advance progress step marks only for every other megabyte */ 5879 if ((device->ov_left & 0x200) == 0x200) 5880 drbd_advance_rs_marks(device, device->ov_left); 5881 5882 if (device->ov_left == 0) { 5883 dw = kmalloc(sizeof(*dw), GFP_NOIO); 5884 if (dw) { 5885 dw->w.cb = w_ov_finished; 5886 dw->device = device; 5887 drbd_queue_work(&peer_device->connection->sender_work, &dw->w); 5888 } else { 5889 drbd_err(device, "kmalloc(dw) failed."); 5890 ov_out_of_sync_print(device); 5891 drbd_resync_finished(device); 5892 } 5893 } 5894 put_ldev(device); 5895 return 0; 5896 } 5897 5898 static int got_skip(struct drbd_connection *connection, struct packet_info *pi) 5899 { 5900 return 0; 5901 } 5902 5903 struct meta_sock_cmd { 5904 size_t pkt_size; 5905 int (*fn)(struct drbd_connection *connection, struct packet_info *); 5906 }; 5907 5908 static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout) 5909 { 5910 long t; 5911 struct net_conf *nc; 5912 5913 rcu_read_lock(); 5914 nc = rcu_dereference(connection->net_conf); 5915 t = ping_timeout ? nc->ping_timeo : nc->ping_int; 5916 rcu_read_unlock(); 5917 5918 t *= HZ; 5919 if (ping_timeout) 5920 t /= 10; 5921 5922 connection->meta.socket->sk->sk_rcvtimeo = t; 5923 } 5924 5925 static void set_ping_timeout(struct drbd_connection *connection) 5926 { 5927 set_rcvtimeo(connection, 1); 5928 } 5929 5930 static void set_idle_timeout(struct drbd_connection *connection) 5931 { 5932 set_rcvtimeo(connection, 0); 5933 } 5934 5935 static struct meta_sock_cmd ack_receiver_tbl[] = { 5936 [P_PING] = { 0, got_Ping }, 5937 [P_PING_ACK] = { 0, got_PingAck }, 5938 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 5939 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 5940 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 5941 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck }, 5942 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, 5943 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, 5944 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply }, 5945 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, 5946 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 5947 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 5948 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 5949 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, 5950 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply }, 5951 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply }, 5952 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck }, 5953 }; 5954 5955 int drbd_ack_receiver(struct drbd_thread *thi) 5956 { 5957 struct drbd_connection *connection = thi->connection; 5958 struct meta_sock_cmd *cmd = NULL; 5959 struct packet_info pi; 5960 unsigned long pre_recv_jif; 5961 int rv; 5962 void *buf = connection->meta.rbuf; 5963 int received = 0; 5964 unsigned int header_size = drbd_header_size(connection); 5965 int expect = header_size; 5966 bool ping_timeout_active = false; 5967 5968 sched_set_fifo_low(current); 5969 5970 while (get_t_state(thi) == RUNNING) { 5971 drbd_thread_current_set_cpu(thi); 5972 5973 conn_reclaim_net_peer_reqs(connection); 5974 5975 if (test_and_clear_bit(SEND_PING, &connection->flags)) { 5976 if (drbd_send_ping(connection)) { 5977 drbd_err(connection, "drbd_send_ping has failed\n"); 5978 goto reconnect; 5979 } 5980 set_ping_timeout(connection); 5981 ping_timeout_active = true; 5982 } 5983 5984 pre_recv_jif = jiffies; 5985 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0); 5986 5987 /* Note: 5988 * -EINTR (on meta) we got a signal 5989 * -EAGAIN (on meta) rcvtimeo expired 5990 * -ECONNRESET other side closed the connection 5991 * -ERESTARTSYS (on data) we got a signal 5992 * rv < 0 other than above: unexpected error! 5993 * rv == expected: full header or command 5994 * rv < expected: "woken" by signal during receive 5995 * rv == 0 : "connection shut down by peer" 5996 */ 5997 if (likely(rv > 0)) { 5998 received += rv; 5999 buf += rv; 6000 } else if (rv == 0) { 6001 if (test_bit(DISCONNECT_SENT, &connection->flags)) { 6002 long t; 6003 rcu_read_lock(); 6004 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10; 6005 rcu_read_unlock(); 6006 6007 t = wait_event_timeout(connection->ping_wait, 6008 connection->cstate < C_WF_REPORT_PARAMS, 6009 t); 6010 if (t) 6011 break; 6012 } 6013 drbd_err(connection, "meta connection shut down by peer.\n"); 6014 goto reconnect; 6015 } else if (rv == -EAGAIN) { 6016 /* If the data socket received something meanwhile, 6017 * that is good enough: peer is still alive. */ 6018 if (time_after(connection->last_received, pre_recv_jif)) 6019 continue; 6020 if (ping_timeout_active) { 6021 drbd_err(connection, "PingAck did not arrive in time.\n"); 6022 goto reconnect; 6023 } 6024 set_bit(SEND_PING, &connection->flags); 6025 continue; 6026 } else if (rv == -EINTR) { 6027 /* maybe drbd_thread_stop(): the while condition will notice. 6028 * maybe woken for send_ping: we'll send a ping above, 6029 * and change the rcvtimeo */ 6030 flush_signals(current); 6031 continue; 6032 } else { 6033 drbd_err(connection, "sock_recvmsg returned %d\n", rv); 6034 goto reconnect; 6035 } 6036 6037 if (received == expect && cmd == NULL) { 6038 if (decode_header(connection, connection->meta.rbuf, &pi)) 6039 goto reconnect; 6040 cmd = &ack_receiver_tbl[pi.cmd]; 6041 if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) { 6042 drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n", 6043 cmdname(pi.cmd), pi.cmd); 6044 goto disconnect; 6045 } 6046 expect = header_size + cmd->pkt_size; 6047 if (pi.size != expect - header_size) { 6048 drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n", 6049 pi.cmd, pi.size); 6050 goto reconnect; 6051 } 6052 } 6053 if (received == expect) { 6054 bool err; 6055 6056 err = cmd->fn(connection, &pi); 6057 if (err) { 6058 drbd_err(connection, "%ps failed\n", cmd->fn); 6059 goto reconnect; 6060 } 6061 6062 connection->last_received = jiffies; 6063 6064 if (cmd == &ack_receiver_tbl[P_PING_ACK]) { 6065 set_idle_timeout(connection); 6066 ping_timeout_active = false; 6067 } 6068 6069 buf = connection->meta.rbuf; 6070 received = 0; 6071 expect = header_size; 6072 cmd = NULL; 6073 } 6074 } 6075 6076 if (0) { 6077 reconnect: 6078 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 6079 conn_md_sync(connection); 6080 } 6081 if (0) { 6082 disconnect: 6083 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 6084 } 6085 6086 drbd_info(connection, "ack_receiver terminated\n"); 6087 6088 return 0; 6089 } 6090 6091 void drbd_send_acks_wf(struct work_struct *ws) 6092 { 6093 struct drbd_peer_device *peer_device = 6094 container_of(ws, struct drbd_peer_device, send_acks_work); 6095 struct drbd_connection *connection = peer_device->connection; 6096 struct drbd_device *device = peer_device->device; 6097 struct net_conf *nc; 6098 int tcp_cork, err; 6099 6100 rcu_read_lock(); 6101 nc = rcu_dereference(connection->net_conf); 6102 tcp_cork = nc->tcp_cork; 6103 rcu_read_unlock(); 6104 6105 if (tcp_cork) 6106 tcp_sock_set_cork(connection->meta.socket->sk, true); 6107 6108 err = drbd_finish_peer_reqs(device); 6109 kref_put(&device->kref, drbd_destroy_device); 6110 /* get is in drbd_endio_write_sec_final(). That is necessary to keep the 6111 struct work_struct send_acks_work alive, which is in the peer_device object */ 6112 6113 if (err) { 6114 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 6115 return; 6116 } 6117 6118 if (tcp_cork) 6119 tcp_sock_set_cork(connection->meta.socket->sk, false); 6120 6121 return; 6122 } 6123