1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 /* 13 * lowcomms.c 14 * 15 * This is the "low-level" comms layer. 16 * 17 * It is responsible for sending/receiving messages 18 * from other nodes in the cluster. 19 * 20 * Cluster nodes are referred to by their nodeids. nodeids are 21 * simply 32 bit numbers to the locking module - if they need to 22 * be expanded for the cluster infrastructure then that is its 23 * responsibility. It is this layer's 24 * responsibility to resolve these into IP address or 25 * whatever it needs for inter-node communication. 26 * 27 * The comms level is two kernel threads that deal mainly with 28 * the receiving of messages from other nodes and passing them 29 * up to the mid-level comms layer (which understands the 30 * message format) for execution by the locking core, and 31 * a send thread which does all the setting up of connections 32 * to remote nodes and the sending of data. Threads are not allowed 33 * to send their own data because it may cause them to wait in times 34 * of high load. Also, this way, the sending thread can collect together 35 * messages bound for one node and send them in one block. 36 * 37 * lowcomms will choose to use either TCP or SCTP as its transport layer 38 * depending on the configuration variable 'protocol'. This should be set 39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a 40 * cluster-wide mechanism as it must be the same on all nodes of the cluster 41 * for the DLM to function. 42 * 43 */ 44 45 #include <asm/ioctls.h> 46 #include <net/sock.h> 47 #include <net/tcp.h> 48 #include <linux/pagemap.h> 49 #include <linux/file.h> 50 #include <linux/mutex.h> 51 #include <linux/sctp.h> 52 #include <linux/slab.h> 53 #include <net/sctp/sctp.h> 54 #include <net/ipv6.h> 55 56 #include <trace/events/dlm.h> 57 58 #include "dlm_internal.h" 59 #include "lowcomms.h" 60 #include "midcomms.h" 61 #include "memory.h" 62 #include "config.h" 63 64 #define NEEDED_RMEM (4*1024*1024) 65 66 /* Number of messages to send before rescheduling */ 67 #define MAX_SEND_MSG_COUNT 25 68 69 struct connection { 70 struct socket *sock; /* NULL if not connected */ 71 uint32_t nodeid; /* So we know who we are in the list */ 72 struct mutex sock_mutex; 73 unsigned long flags; 74 #define CF_READ_PENDING 1 75 #define CF_WRITE_PENDING 2 76 #define CF_INIT_PENDING 4 77 #define CF_IS_OTHERCON 5 78 #define CF_CLOSE 6 79 #define CF_APP_LIMITED 7 80 #define CF_CLOSING 8 81 #define CF_CONNECTED 9 82 #define CF_RECONNECT 10 83 #define CF_DELAY_CONNECT 11 84 struct list_head writequeue; /* List of outgoing writequeue_entries */ 85 spinlock_t writequeue_lock; 86 int retries; 87 #define MAX_CONNECT_RETRIES 3 88 struct hlist_node list; 89 struct connection *othercon; 90 struct connection *sendcon; 91 struct work_struct rwork; /* Receive workqueue */ 92 struct work_struct swork; /* Send workqueue */ 93 unsigned char *rx_buf; 94 int rx_buflen; 95 int rx_leftover; 96 struct rcu_head rcu; 97 }; 98 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 99 100 struct listen_connection { 101 struct socket *sock; 102 struct work_struct rwork; 103 }; 104 105 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end) 106 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset) 107 108 /* An entry waiting to be sent */ 109 struct writequeue_entry { 110 struct list_head list; 111 struct page *page; 112 int offset; 113 int len; 114 int end; 115 int users; 116 bool dirty; 117 struct connection *con; 118 struct list_head msgs; 119 struct kref ref; 120 }; 121 122 struct dlm_msg { 123 struct writequeue_entry *entry; 124 struct dlm_msg *orig_msg; 125 bool retransmit; 126 void *ppc; 127 int len; 128 int idx; /* new()/commit() idx exchange */ 129 130 struct list_head list; 131 struct kref ref; 132 }; 133 134 struct dlm_node_addr { 135 struct list_head list; 136 int nodeid; 137 int mark; 138 int addr_count; 139 int curr_addr_index; 140 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; 141 }; 142 143 struct dlm_proto_ops { 144 bool try_new_addr; 145 const char *name; 146 int proto; 147 148 int (*connect)(struct connection *con, struct socket *sock, 149 struct sockaddr *addr, int addr_len); 150 void (*sockopts)(struct socket *sock); 151 int (*bind)(struct socket *sock); 152 int (*listen_validate)(void); 153 void (*listen_sockopts)(struct socket *sock); 154 int (*listen_bind)(struct socket *sock); 155 }; 156 157 static struct listen_sock_callbacks { 158 void (*sk_error_report)(struct sock *); 159 void (*sk_data_ready)(struct sock *); 160 void (*sk_state_change)(struct sock *); 161 void (*sk_write_space)(struct sock *); 162 } listen_sock; 163 164 static LIST_HEAD(dlm_node_addrs); 165 static DEFINE_SPINLOCK(dlm_node_addrs_spin); 166 167 static struct listen_connection listen_con; 168 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 169 static int dlm_local_count; 170 171 /* Work queues */ 172 static struct workqueue_struct *recv_workqueue; 173 static struct workqueue_struct *send_workqueue; 174 175 static struct hlist_head connection_hash[CONN_HASH_SIZE]; 176 static DEFINE_SPINLOCK(connections_lock); 177 DEFINE_STATIC_SRCU(connections_srcu); 178 179 static const struct dlm_proto_ops *dlm_proto_ops; 180 181 static void process_recv_sockets(struct work_struct *work); 182 static void process_send_sockets(struct work_struct *work); 183 184 bool dlm_lowcomms_is_running(void) 185 { 186 return !!listen_con.sock; 187 } 188 189 static void writequeue_entry_ctor(void *data) 190 { 191 struct writequeue_entry *entry = data; 192 193 INIT_LIST_HEAD(&entry->msgs); 194 } 195 196 struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void) 197 { 198 return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry), 199 0, 0, writequeue_entry_ctor); 200 } 201 202 struct kmem_cache *dlm_lowcomms_msg_cache_create(void) 203 { 204 return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL); 205 } 206 207 /* need to held writequeue_lock */ 208 static struct writequeue_entry *con_next_wq(struct connection *con) 209 { 210 struct writequeue_entry *e; 211 212 e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry, 213 list); 214 /* if len is zero nothing is to send, if there are users filling 215 * buffers we wait until the users are done so we can send more. 216 */ 217 if (!e || e->users || e->len == 0) 218 return NULL; 219 220 return e; 221 } 222 223 static struct connection *__find_con(int nodeid, int r) 224 { 225 struct connection *con; 226 227 hlist_for_each_entry_rcu(con, &connection_hash[r], list) { 228 if (con->nodeid == nodeid) 229 return con; 230 } 231 232 return NULL; 233 } 234 235 static int dlm_con_init(struct connection *con, int nodeid) 236 { 237 con->rx_buflen = dlm_config.ci_buffer_size; 238 con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS); 239 if (!con->rx_buf) 240 return -ENOMEM; 241 242 con->nodeid = nodeid; 243 mutex_init(&con->sock_mutex); 244 INIT_LIST_HEAD(&con->writequeue); 245 spin_lock_init(&con->writequeue_lock); 246 INIT_WORK(&con->swork, process_send_sockets); 247 INIT_WORK(&con->rwork, process_recv_sockets); 248 249 return 0; 250 } 251 252 /* 253 * If 'allocation' is zero then we don't attempt to create a new 254 * connection structure for this node. 255 */ 256 static struct connection *nodeid2con(int nodeid, gfp_t alloc) 257 { 258 struct connection *con, *tmp; 259 int r, ret; 260 261 r = nodeid_hash(nodeid); 262 con = __find_con(nodeid, r); 263 if (con || !alloc) 264 return con; 265 266 con = kzalloc(sizeof(*con), alloc); 267 if (!con) 268 return NULL; 269 270 ret = dlm_con_init(con, nodeid); 271 if (ret) { 272 kfree(con); 273 return NULL; 274 } 275 276 spin_lock(&connections_lock); 277 /* Because multiple workqueues/threads calls this function it can 278 * race on multiple cpu's. Instead of locking hot path __find_con() 279 * we just check in rare cases of recently added nodes again 280 * under protection of connections_lock. If this is the case we 281 * abort our connection creation and return the existing connection. 282 */ 283 tmp = __find_con(nodeid, r); 284 if (tmp) { 285 spin_unlock(&connections_lock); 286 kfree(con->rx_buf); 287 kfree(con); 288 return tmp; 289 } 290 291 hlist_add_head_rcu(&con->list, &connection_hash[r]); 292 spin_unlock(&connections_lock); 293 294 return con; 295 } 296 297 /* Loop round all connections */ 298 static void foreach_conn(void (*conn_func)(struct connection *c)) 299 { 300 int i; 301 struct connection *con; 302 303 for (i = 0; i < CONN_HASH_SIZE; i++) { 304 hlist_for_each_entry_rcu(con, &connection_hash[i], list) 305 conn_func(con); 306 } 307 } 308 309 static struct dlm_node_addr *find_node_addr(int nodeid) 310 { 311 struct dlm_node_addr *na; 312 313 list_for_each_entry(na, &dlm_node_addrs, list) { 314 if (na->nodeid == nodeid) 315 return na; 316 } 317 return NULL; 318 } 319 320 static int addr_compare(const struct sockaddr_storage *x, 321 const struct sockaddr_storage *y) 322 { 323 switch (x->ss_family) { 324 case AF_INET: { 325 struct sockaddr_in *sinx = (struct sockaddr_in *)x; 326 struct sockaddr_in *siny = (struct sockaddr_in *)y; 327 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) 328 return 0; 329 if (sinx->sin_port != siny->sin_port) 330 return 0; 331 break; 332 } 333 case AF_INET6: { 334 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; 335 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; 336 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) 337 return 0; 338 if (sinx->sin6_port != siny->sin6_port) 339 return 0; 340 break; 341 } 342 default: 343 return 0; 344 } 345 return 1; 346 } 347 348 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, 349 struct sockaddr *sa_out, bool try_new_addr, 350 unsigned int *mark) 351 { 352 struct sockaddr_storage sas; 353 struct dlm_node_addr *na; 354 355 if (!dlm_local_count) 356 return -1; 357 358 spin_lock(&dlm_node_addrs_spin); 359 na = find_node_addr(nodeid); 360 if (na && na->addr_count) { 361 memcpy(&sas, na->addr[na->curr_addr_index], 362 sizeof(struct sockaddr_storage)); 363 364 if (try_new_addr) { 365 na->curr_addr_index++; 366 if (na->curr_addr_index == na->addr_count) 367 na->curr_addr_index = 0; 368 } 369 } 370 spin_unlock(&dlm_node_addrs_spin); 371 372 if (!na) 373 return -EEXIST; 374 375 if (!na->addr_count) 376 return -ENOENT; 377 378 *mark = na->mark; 379 380 if (sas_out) 381 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); 382 383 if (!sa_out) 384 return 0; 385 386 if (dlm_local_addr[0]->ss_family == AF_INET) { 387 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; 388 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; 389 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 390 } else { 391 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas; 392 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out; 393 ret6->sin6_addr = in6->sin6_addr; 394 } 395 396 return 0; 397 } 398 399 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid, 400 unsigned int *mark) 401 { 402 struct dlm_node_addr *na; 403 int rv = -EEXIST; 404 int addr_i; 405 406 spin_lock(&dlm_node_addrs_spin); 407 list_for_each_entry(na, &dlm_node_addrs, list) { 408 if (!na->addr_count) 409 continue; 410 411 for (addr_i = 0; addr_i < na->addr_count; addr_i++) { 412 if (addr_compare(na->addr[addr_i], addr)) { 413 *nodeid = na->nodeid; 414 *mark = na->mark; 415 rv = 0; 416 goto unlock; 417 } 418 } 419 } 420 unlock: 421 spin_unlock(&dlm_node_addrs_spin); 422 return rv; 423 } 424 425 /* caller need to held dlm_node_addrs_spin lock */ 426 static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na, 427 const struct sockaddr_storage *addr) 428 { 429 int i; 430 431 for (i = 0; i < na->addr_count; i++) { 432 if (addr_compare(na->addr[i], addr)) 433 return true; 434 } 435 436 return false; 437 } 438 439 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) 440 { 441 struct sockaddr_storage *new_addr; 442 struct dlm_node_addr *new_node, *na; 443 bool ret; 444 445 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS); 446 if (!new_node) 447 return -ENOMEM; 448 449 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS); 450 if (!new_addr) { 451 kfree(new_node); 452 return -ENOMEM; 453 } 454 455 memcpy(new_addr, addr, len); 456 457 spin_lock(&dlm_node_addrs_spin); 458 na = find_node_addr(nodeid); 459 if (!na) { 460 new_node->nodeid = nodeid; 461 new_node->addr[0] = new_addr; 462 new_node->addr_count = 1; 463 new_node->mark = dlm_config.ci_mark; 464 list_add(&new_node->list, &dlm_node_addrs); 465 spin_unlock(&dlm_node_addrs_spin); 466 return 0; 467 } 468 469 ret = dlm_lowcomms_na_has_addr(na, addr); 470 if (ret) { 471 spin_unlock(&dlm_node_addrs_spin); 472 kfree(new_addr); 473 kfree(new_node); 474 return -EEXIST; 475 } 476 477 if (na->addr_count >= DLM_MAX_ADDR_COUNT) { 478 spin_unlock(&dlm_node_addrs_spin); 479 kfree(new_addr); 480 kfree(new_node); 481 return -ENOSPC; 482 } 483 484 na->addr[na->addr_count++] = new_addr; 485 spin_unlock(&dlm_node_addrs_spin); 486 kfree(new_node); 487 return 0; 488 } 489 490 /* Data available on socket or listen socket received a connect */ 491 static void lowcomms_data_ready(struct sock *sk) 492 { 493 struct connection *con; 494 495 con = sock2con(sk); 496 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 497 queue_work(recv_workqueue, &con->rwork); 498 } 499 500 static void lowcomms_listen_data_ready(struct sock *sk) 501 { 502 queue_work(recv_workqueue, &listen_con.rwork); 503 } 504 505 static void lowcomms_write_space(struct sock *sk) 506 { 507 struct connection *con; 508 509 con = sock2con(sk); 510 if (!con) 511 return; 512 513 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) { 514 log_print("connected to node %d", con->nodeid); 515 queue_work(send_workqueue, &con->swork); 516 return; 517 } 518 519 clear_bit(SOCK_NOSPACE, &con->sock->flags); 520 521 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 522 con->sock->sk->sk_write_pending--; 523 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); 524 } 525 526 queue_work(send_workqueue, &con->swork); 527 } 528 529 static inline void lowcomms_connect_sock(struct connection *con) 530 { 531 if (test_bit(CF_CLOSE, &con->flags)) 532 return; 533 queue_work(send_workqueue, &con->swork); 534 cond_resched(); 535 } 536 537 static void lowcomms_state_change(struct sock *sk) 538 { 539 /* SCTP layer is not calling sk_data_ready when the connection 540 * is done, so we catch the signal through here. Also, it 541 * doesn't switch socket state when entering shutdown, so we 542 * skip the write in that case. 543 */ 544 if (sk->sk_shutdown) { 545 if (sk->sk_shutdown == RCV_SHUTDOWN) 546 lowcomms_data_ready(sk); 547 } else if (sk->sk_state == TCP_ESTABLISHED) { 548 lowcomms_write_space(sk); 549 } 550 } 551 552 int dlm_lowcomms_connect_node(int nodeid) 553 { 554 struct connection *con; 555 int idx; 556 557 if (nodeid == dlm_our_nodeid()) 558 return 0; 559 560 idx = srcu_read_lock(&connections_srcu); 561 con = nodeid2con(nodeid, GFP_NOFS); 562 if (!con) { 563 srcu_read_unlock(&connections_srcu, idx); 564 return -ENOMEM; 565 } 566 567 lowcomms_connect_sock(con); 568 srcu_read_unlock(&connections_srcu, idx); 569 570 return 0; 571 } 572 573 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark) 574 { 575 struct dlm_node_addr *na; 576 577 spin_lock(&dlm_node_addrs_spin); 578 na = find_node_addr(nodeid); 579 if (!na) { 580 spin_unlock(&dlm_node_addrs_spin); 581 return -ENOENT; 582 } 583 584 na->mark = mark; 585 spin_unlock(&dlm_node_addrs_spin); 586 587 return 0; 588 } 589 590 static void lowcomms_error_report(struct sock *sk) 591 { 592 struct connection *con; 593 void (*orig_report)(struct sock *) = NULL; 594 struct inet_sock *inet; 595 596 con = sock2con(sk); 597 if (con == NULL) 598 goto out; 599 600 orig_report = listen_sock.sk_error_report; 601 602 inet = inet_sk(sk); 603 switch (sk->sk_family) { 604 case AF_INET: 605 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 606 "sending to node %d at %pI4, dport %d, " 607 "sk_err=%d/%d\n", dlm_our_nodeid(), 608 con->nodeid, &inet->inet_daddr, 609 ntohs(inet->inet_dport), sk->sk_err, 610 sk->sk_err_soft); 611 break; 612 #if IS_ENABLED(CONFIG_IPV6) 613 case AF_INET6: 614 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 615 "sending to node %d at %pI6c, " 616 "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), 617 con->nodeid, &sk->sk_v6_daddr, 618 ntohs(inet->inet_dport), sk->sk_err, 619 sk->sk_err_soft); 620 break; 621 #endif 622 default: 623 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 624 "invalid socket family %d set, " 625 "sk_err=%d/%d\n", dlm_our_nodeid(), 626 sk->sk_family, sk->sk_err, sk->sk_err_soft); 627 goto out; 628 } 629 630 /* below sendcon only handling */ 631 if (test_bit(CF_IS_OTHERCON, &con->flags)) 632 con = con->sendcon; 633 634 switch (sk->sk_err) { 635 case ECONNREFUSED: 636 set_bit(CF_DELAY_CONNECT, &con->flags); 637 break; 638 default: 639 break; 640 } 641 642 if (!test_and_set_bit(CF_RECONNECT, &con->flags)) 643 queue_work(send_workqueue, &con->swork); 644 645 out: 646 if (orig_report) 647 orig_report(sk); 648 } 649 650 /* Note: sk_callback_lock must be locked before calling this function. */ 651 static void save_listen_callbacks(struct socket *sock) 652 { 653 struct sock *sk = sock->sk; 654 655 listen_sock.sk_data_ready = sk->sk_data_ready; 656 listen_sock.sk_state_change = sk->sk_state_change; 657 listen_sock.sk_write_space = sk->sk_write_space; 658 listen_sock.sk_error_report = sk->sk_error_report; 659 } 660 661 static void restore_callbacks(struct socket *sock) 662 { 663 struct sock *sk = sock->sk; 664 665 lock_sock(sk); 666 sk->sk_user_data = NULL; 667 sk->sk_data_ready = listen_sock.sk_data_ready; 668 sk->sk_state_change = listen_sock.sk_state_change; 669 sk->sk_write_space = listen_sock.sk_write_space; 670 sk->sk_error_report = listen_sock.sk_error_report; 671 release_sock(sk); 672 } 673 674 static void add_listen_sock(struct socket *sock, struct listen_connection *con) 675 { 676 struct sock *sk = sock->sk; 677 678 lock_sock(sk); 679 save_listen_callbacks(sock); 680 con->sock = sock; 681 682 sk->sk_user_data = con; 683 sk->sk_allocation = GFP_NOFS; 684 /* Install a data_ready callback */ 685 sk->sk_data_ready = lowcomms_listen_data_ready; 686 release_sock(sk); 687 } 688 689 /* Make a socket active */ 690 static void add_sock(struct socket *sock, struct connection *con) 691 { 692 struct sock *sk = sock->sk; 693 694 lock_sock(sk); 695 con->sock = sock; 696 697 sk->sk_user_data = con; 698 /* Install a data_ready callback */ 699 sk->sk_data_ready = lowcomms_data_ready; 700 sk->sk_write_space = lowcomms_write_space; 701 sk->sk_state_change = lowcomms_state_change; 702 sk->sk_allocation = GFP_NOFS; 703 sk->sk_error_report = lowcomms_error_report; 704 release_sock(sk); 705 } 706 707 /* Add the port number to an IPv6 or 4 sockaddr and return the address 708 length */ 709 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 710 int *addr_len) 711 { 712 saddr->ss_family = dlm_local_addr[0]->ss_family; 713 if (saddr->ss_family == AF_INET) { 714 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 715 in4_addr->sin_port = cpu_to_be16(port); 716 *addr_len = sizeof(struct sockaddr_in); 717 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 718 } else { 719 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 720 in6_addr->sin6_port = cpu_to_be16(port); 721 *addr_len = sizeof(struct sockaddr_in6); 722 } 723 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 724 } 725 726 static void dlm_page_release(struct kref *kref) 727 { 728 struct writequeue_entry *e = container_of(kref, struct writequeue_entry, 729 ref); 730 731 __free_page(e->page); 732 dlm_free_writequeue(e); 733 } 734 735 static void dlm_msg_release(struct kref *kref) 736 { 737 struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref); 738 739 kref_put(&msg->entry->ref, dlm_page_release); 740 dlm_free_msg(msg); 741 } 742 743 static void free_entry(struct writequeue_entry *e) 744 { 745 struct dlm_msg *msg, *tmp; 746 747 list_for_each_entry_safe(msg, tmp, &e->msgs, list) { 748 if (msg->orig_msg) { 749 msg->orig_msg->retransmit = false; 750 kref_put(&msg->orig_msg->ref, dlm_msg_release); 751 } 752 753 list_del(&msg->list); 754 kref_put(&msg->ref, dlm_msg_release); 755 } 756 757 list_del(&e->list); 758 kref_put(&e->ref, dlm_page_release); 759 } 760 761 static void dlm_close_sock(struct socket **sock) 762 { 763 if (*sock) { 764 restore_callbacks(*sock); 765 sock_release(*sock); 766 *sock = NULL; 767 } 768 } 769 770 /* Close a remote connection and tidy up */ 771 static void close_connection(struct connection *con, bool and_other, 772 bool tx, bool rx) 773 { 774 bool closing = test_and_set_bit(CF_CLOSING, &con->flags); 775 struct writequeue_entry *e; 776 777 if (tx && !closing && cancel_work_sync(&con->swork)) { 778 log_print("canceled swork for node %d", con->nodeid); 779 clear_bit(CF_WRITE_PENDING, &con->flags); 780 } 781 if (rx && !closing && cancel_work_sync(&con->rwork)) { 782 log_print("canceled rwork for node %d", con->nodeid); 783 clear_bit(CF_READ_PENDING, &con->flags); 784 } 785 786 mutex_lock(&con->sock_mutex); 787 dlm_close_sock(&con->sock); 788 789 if (con->othercon && and_other) { 790 /* Will only re-enter once. */ 791 close_connection(con->othercon, false, tx, rx); 792 } 793 794 /* if we send a writequeue entry only a half way, we drop the 795 * whole entry because reconnection and that we not start of the 796 * middle of a msg which will confuse the other end. 797 * 798 * we can always drop messages because retransmits, but what we 799 * cannot allow is to transmit half messages which may be processed 800 * at the other side. 801 * 802 * our policy is to start on a clean state when disconnects, we don't 803 * know what's send/received on transport layer in this case. 804 */ 805 spin_lock(&con->writequeue_lock); 806 if (!list_empty(&con->writequeue)) { 807 e = list_first_entry(&con->writequeue, struct writequeue_entry, 808 list); 809 if (e->dirty) 810 free_entry(e); 811 } 812 spin_unlock(&con->writequeue_lock); 813 814 con->rx_leftover = 0; 815 con->retries = 0; 816 clear_bit(CF_APP_LIMITED, &con->flags); 817 clear_bit(CF_CONNECTED, &con->flags); 818 clear_bit(CF_DELAY_CONNECT, &con->flags); 819 clear_bit(CF_RECONNECT, &con->flags); 820 mutex_unlock(&con->sock_mutex); 821 clear_bit(CF_CLOSING, &con->flags); 822 } 823 824 static int con_realloc_receive_buf(struct connection *con, int newlen) 825 { 826 unsigned char *newbuf; 827 828 newbuf = kmalloc(newlen, GFP_NOFS); 829 if (!newbuf) 830 return -ENOMEM; 831 832 /* copy any leftover from last receive */ 833 if (con->rx_leftover) 834 memmove(newbuf, con->rx_buf, con->rx_leftover); 835 836 /* swap to new buffer space */ 837 kfree(con->rx_buf); 838 con->rx_buflen = newlen; 839 con->rx_buf = newbuf; 840 841 return 0; 842 } 843 844 /* Data received from remote end */ 845 static int receive_from_sock(struct connection *con) 846 { 847 struct msghdr msg; 848 struct kvec iov; 849 int ret, buflen; 850 851 mutex_lock(&con->sock_mutex); 852 853 if (con->sock == NULL) { 854 ret = -EAGAIN; 855 goto out_close; 856 } 857 858 /* realloc if we get new buffer size to read out */ 859 buflen = dlm_config.ci_buffer_size; 860 if (con->rx_buflen != buflen && con->rx_leftover <= buflen) { 861 ret = con_realloc_receive_buf(con, buflen); 862 if (ret < 0) 863 goto out_resched; 864 } 865 866 for (;;) { 867 /* calculate new buffer parameter regarding last receive and 868 * possible leftover bytes 869 */ 870 iov.iov_base = con->rx_buf + con->rx_leftover; 871 iov.iov_len = con->rx_buflen - con->rx_leftover; 872 873 memset(&msg, 0, sizeof(msg)); 874 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 875 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, 876 msg.msg_flags); 877 trace_dlm_recv(con->nodeid, ret); 878 if (ret == -EAGAIN) 879 break; 880 else if (ret <= 0) 881 goto out_close; 882 883 /* new buflen according readed bytes and leftover from last receive */ 884 buflen = ret + con->rx_leftover; 885 ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen); 886 if (ret < 0) 887 goto out_close; 888 889 /* calculate leftover bytes from process and put it into begin of 890 * the receive buffer, so next receive we have the full message 891 * at the start address of the receive buffer. 892 */ 893 con->rx_leftover = buflen - ret; 894 if (con->rx_leftover) { 895 memmove(con->rx_buf, con->rx_buf + ret, 896 con->rx_leftover); 897 } 898 } 899 900 dlm_midcomms_receive_done(con->nodeid); 901 mutex_unlock(&con->sock_mutex); 902 return 0; 903 904 out_resched: 905 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 906 queue_work(recv_workqueue, &con->rwork); 907 mutex_unlock(&con->sock_mutex); 908 return -EAGAIN; 909 910 out_close: 911 if (ret == 0) { 912 log_print("connection %p got EOF from %d", 913 con, con->nodeid); 914 915 mutex_unlock(&con->sock_mutex); 916 close_connection(con, false, true, false); 917 /* signal to breaking receive worker */ 918 ret = -1; 919 } else { 920 mutex_unlock(&con->sock_mutex); 921 } 922 return ret; 923 } 924 925 /* Listening socket is busy, accept a connection */ 926 static int accept_from_sock(struct listen_connection *con) 927 { 928 int result; 929 struct sockaddr_storage peeraddr; 930 struct socket *newsock; 931 int len, idx; 932 int nodeid; 933 struct connection *newcon; 934 struct connection *addcon; 935 unsigned int mark; 936 937 if (!con->sock) 938 return -ENOTCONN; 939 940 result = kernel_accept(con->sock, &newsock, O_NONBLOCK); 941 if (result < 0) 942 goto accept_err; 943 944 /* Get the connected socket's peer */ 945 memset(&peeraddr, 0, sizeof(peeraddr)); 946 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); 947 if (len < 0) { 948 result = -ECONNABORTED; 949 goto accept_err; 950 } 951 952 /* Get the new node's NODEID */ 953 make_sockaddr(&peeraddr, 0, &len); 954 if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) { 955 switch (peeraddr.ss_family) { 956 case AF_INET: { 957 struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr; 958 959 log_print("connect from non cluster IPv4 node %pI4", 960 &sin->sin_addr); 961 break; 962 } 963 #if IS_ENABLED(CONFIG_IPV6) 964 case AF_INET6: { 965 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr; 966 967 log_print("connect from non cluster IPv6 node %pI6c", 968 &sin6->sin6_addr); 969 break; 970 } 971 #endif 972 default: 973 log_print("invalid family from non cluster node"); 974 break; 975 } 976 977 sock_release(newsock); 978 return -1; 979 } 980 981 log_print("got connection from %d", nodeid); 982 983 /* Check to see if we already have a connection to this node. This 984 * could happen if the two nodes initiate a connection at roughly 985 * the same time and the connections cross on the wire. 986 * In this case we store the incoming one in "othercon" 987 */ 988 idx = srcu_read_lock(&connections_srcu); 989 newcon = nodeid2con(nodeid, GFP_NOFS); 990 if (!newcon) { 991 srcu_read_unlock(&connections_srcu, idx); 992 result = -ENOMEM; 993 goto accept_err; 994 } 995 996 sock_set_mark(newsock->sk, mark); 997 998 mutex_lock(&newcon->sock_mutex); 999 if (newcon->sock) { 1000 struct connection *othercon = newcon->othercon; 1001 1002 if (!othercon) { 1003 othercon = kzalloc(sizeof(*othercon), GFP_NOFS); 1004 if (!othercon) { 1005 log_print("failed to allocate incoming socket"); 1006 mutex_unlock(&newcon->sock_mutex); 1007 srcu_read_unlock(&connections_srcu, idx); 1008 result = -ENOMEM; 1009 goto accept_err; 1010 } 1011 1012 result = dlm_con_init(othercon, nodeid); 1013 if (result < 0) { 1014 kfree(othercon); 1015 mutex_unlock(&newcon->sock_mutex); 1016 srcu_read_unlock(&connections_srcu, idx); 1017 goto accept_err; 1018 } 1019 1020 lockdep_set_subclass(&othercon->sock_mutex, 1); 1021 set_bit(CF_IS_OTHERCON, &othercon->flags); 1022 newcon->othercon = othercon; 1023 othercon->sendcon = newcon; 1024 } else { 1025 /* close other sock con if we have something new */ 1026 close_connection(othercon, false, true, false); 1027 } 1028 1029 mutex_lock(&othercon->sock_mutex); 1030 add_sock(newsock, othercon); 1031 addcon = othercon; 1032 mutex_unlock(&othercon->sock_mutex); 1033 } 1034 else { 1035 /* accept copies the sk after we've saved the callbacks, so we 1036 don't want to save them a second time or comm errors will 1037 result in calling sk_error_report recursively. */ 1038 add_sock(newsock, newcon); 1039 addcon = newcon; 1040 } 1041 1042 set_bit(CF_CONNECTED, &addcon->flags); 1043 mutex_unlock(&newcon->sock_mutex); 1044 1045 /* 1046 * Add it to the active queue in case we got data 1047 * between processing the accept adding the socket 1048 * to the read_sockets list 1049 */ 1050 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 1051 queue_work(recv_workqueue, &addcon->rwork); 1052 1053 srcu_read_unlock(&connections_srcu, idx); 1054 1055 return 0; 1056 1057 accept_err: 1058 if (newsock) 1059 sock_release(newsock); 1060 1061 if (result != -EAGAIN) 1062 log_print("error accepting connection from node: %d", result); 1063 return result; 1064 } 1065 1066 /* 1067 * writequeue_entry_complete - try to delete and free write queue entry 1068 * @e: write queue entry to try to delete 1069 * @completed: bytes completed 1070 * 1071 * writequeue_lock must be held. 1072 */ 1073 static void writequeue_entry_complete(struct writequeue_entry *e, int completed) 1074 { 1075 e->offset += completed; 1076 e->len -= completed; 1077 /* signal that page was half way transmitted */ 1078 e->dirty = true; 1079 1080 if (e->len == 0 && e->users == 0) 1081 free_entry(e); 1082 } 1083 1084 /* 1085 * sctp_bind_addrs - bind a SCTP socket to all our addresses 1086 */ 1087 static int sctp_bind_addrs(struct socket *sock, uint16_t port) 1088 { 1089 struct sockaddr_storage localaddr; 1090 struct sockaddr *addr = (struct sockaddr *)&localaddr; 1091 int i, addr_len, result = 0; 1092 1093 for (i = 0; i < dlm_local_count; i++) { 1094 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1095 make_sockaddr(&localaddr, port, &addr_len); 1096 1097 if (!i) 1098 result = kernel_bind(sock, addr, addr_len); 1099 else 1100 result = sock_bind_add(sock->sk, addr, addr_len); 1101 1102 if (result < 0) { 1103 log_print("Can't bind to %d addr number %d, %d.\n", 1104 port, i + 1, result); 1105 break; 1106 } 1107 } 1108 return result; 1109 } 1110 1111 /* Get local addresses */ 1112 static void init_local(void) 1113 { 1114 struct sockaddr_storage sas, *addr; 1115 int i; 1116 1117 dlm_local_count = 0; 1118 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { 1119 if (dlm_our_addr(&sas, i)) 1120 break; 1121 1122 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS); 1123 if (!addr) 1124 break; 1125 dlm_local_addr[dlm_local_count++] = addr; 1126 } 1127 } 1128 1129 static void deinit_local(void) 1130 { 1131 int i; 1132 1133 for (i = 0; i < dlm_local_count; i++) 1134 kfree(dlm_local_addr[i]); 1135 } 1136 1137 static struct writequeue_entry *new_writequeue_entry(struct connection *con) 1138 { 1139 struct writequeue_entry *entry; 1140 1141 entry = dlm_allocate_writequeue(); 1142 if (!entry) 1143 return NULL; 1144 1145 entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO); 1146 if (!entry->page) { 1147 dlm_free_writequeue(entry); 1148 return NULL; 1149 } 1150 1151 entry->offset = 0; 1152 entry->len = 0; 1153 entry->end = 0; 1154 entry->dirty = false; 1155 entry->con = con; 1156 entry->users = 1; 1157 kref_init(&entry->ref); 1158 return entry; 1159 } 1160 1161 static struct writequeue_entry *new_wq_entry(struct connection *con, int len, 1162 char **ppc, void (*cb)(void *data), 1163 void *data) 1164 { 1165 struct writequeue_entry *e; 1166 1167 spin_lock(&con->writequeue_lock); 1168 if (!list_empty(&con->writequeue)) { 1169 e = list_last_entry(&con->writequeue, struct writequeue_entry, list); 1170 if (DLM_WQ_REMAIN_BYTES(e) >= len) { 1171 kref_get(&e->ref); 1172 1173 *ppc = page_address(e->page) + e->end; 1174 if (cb) 1175 cb(data); 1176 1177 e->end += len; 1178 e->users++; 1179 goto out; 1180 } 1181 } 1182 1183 e = new_writequeue_entry(con); 1184 if (!e) 1185 goto out; 1186 1187 kref_get(&e->ref); 1188 *ppc = page_address(e->page); 1189 e->end += len; 1190 if (cb) 1191 cb(data); 1192 1193 list_add_tail(&e->list, &con->writequeue); 1194 1195 out: 1196 spin_unlock(&con->writequeue_lock); 1197 return e; 1198 }; 1199 1200 static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, 1201 gfp_t allocation, char **ppc, 1202 void (*cb)(void *data), 1203 void *data) 1204 { 1205 struct writequeue_entry *e; 1206 struct dlm_msg *msg; 1207 1208 msg = dlm_allocate_msg(allocation); 1209 if (!msg) 1210 return NULL; 1211 1212 kref_init(&msg->ref); 1213 1214 e = new_wq_entry(con, len, ppc, cb, data); 1215 if (!e) { 1216 dlm_free_msg(msg); 1217 return NULL; 1218 } 1219 1220 msg->retransmit = false; 1221 msg->orig_msg = NULL; 1222 msg->ppc = *ppc; 1223 msg->len = len; 1224 msg->entry = e; 1225 1226 return msg; 1227 } 1228 1229 /* avoid false positive for nodes_srcu, unlock happens in 1230 * dlm_lowcomms_commit_msg which is a must call if success 1231 */ 1232 #ifndef __CHECKER__ 1233 struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, 1234 char **ppc, void (*cb)(void *data), 1235 void *data) 1236 { 1237 struct connection *con; 1238 struct dlm_msg *msg; 1239 int idx; 1240 1241 if (len > DLM_MAX_SOCKET_BUFSIZE || 1242 len < sizeof(struct dlm_header)) { 1243 BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE); 1244 log_print("failed to allocate a buffer of size %d", len); 1245 WARN_ON(1); 1246 return NULL; 1247 } 1248 1249 idx = srcu_read_lock(&connections_srcu); 1250 con = nodeid2con(nodeid, allocation); 1251 if (!con) { 1252 srcu_read_unlock(&connections_srcu, idx); 1253 return NULL; 1254 } 1255 1256 msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data); 1257 if (!msg) { 1258 srcu_read_unlock(&connections_srcu, idx); 1259 return NULL; 1260 } 1261 1262 /* for dlm_lowcomms_commit_msg() */ 1263 kref_get(&msg->ref); 1264 /* we assume if successful commit must called */ 1265 msg->idx = idx; 1266 return msg; 1267 } 1268 #endif 1269 1270 static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg) 1271 { 1272 struct writequeue_entry *e = msg->entry; 1273 struct connection *con = e->con; 1274 int users; 1275 1276 spin_lock(&con->writequeue_lock); 1277 kref_get(&msg->ref); 1278 list_add(&msg->list, &e->msgs); 1279 1280 users = --e->users; 1281 if (users) 1282 goto out; 1283 1284 e->len = DLM_WQ_LENGTH_BYTES(e); 1285 spin_unlock(&con->writequeue_lock); 1286 1287 queue_work(send_workqueue, &con->swork); 1288 return; 1289 1290 out: 1291 spin_unlock(&con->writequeue_lock); 1292 return; 1293 } 1294 1295 /* avoid false positive for nodes_srcu, lock was happen in 1296 * dlm_lowcomms_new_msg 1297 */ 1298 #ifndef __CHECKER__ 1299 void dlm_lowcomms_commit_msg(struct dlm_msg *msg) 1300 { 1301 _dlm_lowcomms_commit_msg(msg); 1302 srcu_read_unlock(&connections_srcu, msg->idx); 1303 /* because dlm_lowcomms_new_msg() */ 1304 kref_put(&msg->ref, dlm_msg_release); 1305 } 1306 #endif 1307 1308 void dlm_lowcomms_put_msg(struct dlm_msg *msg) 1309 { 1310 kref_put(&msg->ref, dlm_msg_release); 1311 } 1312 1313 /* does not held connections_srcu, usage workqueue only */ 1314 int dlm_lowcomms_resend_msg(struct dlm_msg *msg) 1315 { 1316 struct dlm_msg *msg_resend; 1317 char *ppc; 1318 1319 if (msg->retransmit) 1320 return 1; 1321 1322 msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, 1323 GFP_ATOMIC, &ppc, NULL, NULL); 1324 if (!msg_resend) 1325 return -ENOMEM; 1326 1327 msg->retransmit = true; 1328 kref_get(&msg->ref); 1329 msg_resend->orig_msg = msg; 1330 1331 memcpy(ppc, msg->ppc, msg->len); 1332 _dlm_lowcomms_commit_msg(msg_resend); 1333 dlm_lowcomms_put_msg(msg_resend); 1334 1335 return 0; 1336 } 1337 1338 /* Send a message */ 1339 static void send_to_sock(struct connection *con) 1340 { 1341 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1342 struct writequeue_entry *e; 1343 int len, offset, ret; 1344 int count; 1345 1346 again: 1347 count = 0; 1348 1349 mutex_lock(&con->sock_mutex); 1350 if (con->sock == NULL) 1351 goto out_connect; 1352 1353 spin_lock(&con->writequeue_lock); 1354 for (;;) { 1355 e = con_next_wq(con); 1356 if (!e) 1357 break; 1358 1359 len = e->len; 1360 offset = e->offset; 1361 BUG_ON(len == 0 && e->users == 0); 1362 spin_unlock(&con->writequeue_lock); 1363 1364 ret = kernel_sendpage(con->sock, e->page, offset, len, 1365 msg_flags); 1366 trace_dlm_send(con->nodeid, ret); 1367 if (ret == -EAGAIN || ret == 0) { 1368 if (ret == -EAGAIN && 1369 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && 1370 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1371 /* Notify TCP that we're limited by the 1372 * application window size. 1373 */ 1374 set_bit(SOCK_NOSPACE, &con->sock->flags); 1375 con->sock->sk->sk_write_pending++; 1376 } 1377 cond_resched(); 1378 goto out; 1379 } else if (ret < 0) 1380 goto out; 1381 1382 spin_lock(&con->writequeue_lock); 1383 writequeue_entry_complete(e, ret); 1384 1385 /* Don't starve people filling buffers */ 1386 if (++count >= MAX_SEND_MSG_COUNT) { 1387 spin_unlock(&con->writequeue_lock); 1388 mutex_unlock(&con->sock_mutex); 1389 cond_resched(); 1390 goto again; 1391 } 1392 } 1393 spin_unlock(&con->writequeue_lock); 1394 1395 out: 1396 mutex_unlock(&con->sock_mutex); 1397 return; 1398 1399 out_connect: 1400 mutex_unlock(&con->sock_mutex); 1401 queue_work(send_workqueue, &con->swork); 1402 cond_resched(); 1403 } 1404 1405 static void clean_one_writequeue(struct connection *con) 1406 { 1407 struct writequeue_entry *e, *safe; 1408 1409 spin_lock(&con->writequeue_lock); 1410 list_for_each_entry_safe(e, safe, &con->writequeue, list) { 1411 free_entry(e); 1412 } 1413 spin_unlock(&con->writequeue_lock); 1414 } 1415 1416 /* Called from recovery when it knows that a node has 1417 left the cluster */ 1418 int dlm_lowcomms_close(int nodeid) 1419 { 1420 struct connection *con; 1421 struct dlm_node_addr *na; 1422 int idx; 1423 1424 log_print("closing connection to node %d", nodeid); 1425 idx = srcu_read_lock(&connections_srcu); 1426 con = nodeid2con(nodeid, 0); 1427 if (con) { 1428 set_bit(CF_CLOSE, &con->flags); 1429 close_connection(con, true, true, true); 1430 clean_one_writequeue(con); 1431 if (con->othercon) 1432 clean_one_writequeue(con->othercon); 1433 } 1434 srcu_read_unlock(&connections_srcu, idx); 1435 1436 spin_lock(&dlm_node_addrs_spin); 1437 na = find_node_addr(nodeid); 1438 if (na) { 1439 list_del(&na->list); 1440 while (na->addr_count--) 1441 kfree(na->addr[na->addr_count]); 1442 kfree(na); 1443 } 1444 spin_unlock(&dlm_node_addrs_spin); 1445 1446 return 0; 1447 } 1448 1449 /* Receive workqueue function */ 1450 static void process_recv_sockets(struct work_struct *work) 1451 { 1452 struct connection *con = container_of(work, struct connection, rwork); 1453 1454 clear_bit(CF_READ_PENDING, &con->flags); 1455 receive_from_sock(con); 1456 } 1457 1458 static void process_listen_recv_socket(struct work_struct *work) 1459 { 1460 int ret; 1461 1462 do { 1463 ret = accept_from_sock(&listen_con); 1464 } while (!ret); 1465 } 1466 1467 static void dlm_connect(struct connection *con) 1468 { 1469 struct sockaddr_storage addr; 1470 int result, addr_len; 1471 struct socket *sock; 1472 unsigned int mark; 1473 1474 /* Some odd races can cause double-connects, ignore them */ 1475 if (con->retries++ > MAX_CONNECT_RETRIES) 1476 return; 1477 1478 if (con->sock) { 1479 log_print("node %d already connected.", con->nodeid); 1480 return; 1481 } 1482 1483 memset(&addr, 0, sizeof(addr)); 1484 result = nodeid_to_addr(con->nodeid, &addr, NULL, 1485 dlm_proto_ops->try_new_addr, &mark); 1486 if (result < 0) { 1487 log_print("no address for nodeid %d", con->nodeid); 1488 return; 1489 } 1490 1491 /* Create a socket to communicate with */ 1492 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1493 SOCK_STREAM, dlm_proto_ops->proto, &sock); 1494 if (result < 0) 1495 goto socket_err; 1496 1497 sock_set_mark(sock->sk, mark); 1498 dlm_proto_ops->sockopts(sock); 1499 1500 add_sock(sock, con); 1501 1502 result = dlm_proto_ops->bind(sock); 1503 if (result < 0) 1504 goto add_sock_err; 1505 1506 log_print_ratelimited("connecting to %d", con->nodeid); 1507 make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len); 1508 result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr, 1509 addr_len); 1510 if (result < 0) 1511 goto add_sock_err; 1512 1513 return; 1514 1515 add_sock_err: 1516 dlm_close_sock(&con->sock); 1517 1518 socket_err: 1519 /* 1520 * Some errors are fatal and this list might need adjusting. For other 1521 * errors we try again until the max number of retries is reached. 1522 */ 1523 if (result != -EHOSTUNREACH && 1524 result != -ENETUNREACH && 1525 result != -ENETDOWN && 1526 result != -EINVAL && 1527 result != -EPROTONOSUPPORT) { 1528 log_print("connect %d try %d error %d", con->nodeid, 1529 con->retries, result); 1530 msleep(1000); 1531 lowcomms_connect_sock(con); 1532 } 1533 } 1534 1535 /* Send workqueue function */ 1536 static void process_send_sockets(struct work_struct *work) 1537 { 1538 struct connection *con = container_of(work, struct connection, swork); 1539 1540 WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags)); 1541 1542 clear_bit(CF_WRITE_PENDING, &con->flags); 1543 1544 if (test_and_clear_bit(CF_RECONNECT, &con->flags)) { 1545 close_connection(con, false, false, true); 1546 dlm_midcomms_unack_msg_resend(con->nodeid); 1547 } 1548 1549 if (con->sock == NULL) { 1550 if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags)) 1551 msleep(1000); 1552 1553 mutex_lock(&con->sock_mutex); 1554 dlm_connect(con); 1555 mutex_unlock(&con->sock_mutex); 1556 } 1557 1558 if (!list_empty(&con->writequeue)) 1559 send_to_sock(con); 1560 } 1561 1562 static void work_stop(void) 1563 { 1564 if (recv_workqueue) { 1565 destroy_workqueue(recv_workqueue); 1566 recv_workqueue = NULL; 1567 } 1568 1569 if (send_workqueue) { 1570 destroy_workqueue(send_workqueue); 1571 send_workqueue = NULL; 1572 } 1573 } 1574 1575 static int work_start(void) 1576 { 1577 recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM); 1578 if (!recv_workqueue) { 1579 log_print("can't start dlm_recv"); 1580 return -ENOMEM; 1581 } 1582 1583 send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM); 1584 if (!send_workqueue) { 1585 log_print("can't start dlm_send"); 1586 destroy_workqueue(recv_workqueue); 1587 recv_workqueue = NULL; 1588 return -ENOMEM; 1589 } 1590 1591 return 0; 1592 } 1593 1594 void dlm_lowcomms_shutdown(void) 1595 { 1596 restore_callbacks(listen_con.sock); 1597 1598 if (recv_workqueue) 1599 flush_workqueue(recv_workqueue); 1600 if (send_workqueue) 1601 flush_workqueue(send_workqueue); 1602 1603 dlm_close_sock(&listen_con.sock); 1604 } 1605 1606 void dlm_lowcomms_shutdown_node(int nodeid, bool force) 1607 { 1608 struct connection *con; 1609 int idx; 1610 1611 idx = srcu_read_lock(&connections_srcu); 1612 con = nodeid2con(nodeid, 0); 1613 if (WARN_ON_ONCE(!con)) { 1614 srcu_read_unlock(&connections_srcu, idx); 1615 return; 1616 } 1617 1618 WARN_ON_ONCE(!force && !list_empty(&con->writequeue)); 1619 clean_one_writequeue(con); 1620 if (con->othercon) 1621 clean_one_writequeue(con->othercon); 1622 close_connection(con, true, true, true); 1623 srcu_read_unlock(&connections_srcu, idx); 1624 } 1625 1626 static void _stop_conn(struct connection *con, bool and_other) 1627 { 1628 mutex_lock(&con->sock_mutex); 1629 set_bit(CF_CLOSE, &con->flags); 1630 set_bit(CF_READ_PENDING, &con->flags); 1631 set_bit(CF_WRITE_PENDING, &con->flags); 1632 if (con->sock && con->sock->sk) { 1633 lock_sock(con->sock->sk); 1634 con->sock->sk->sk_user_data = NULL; 1635 release_sock(con->sock->sk); 1636 } 1637 if (con->othercon && and_other) 1638 _stop_conn(con->othercon, false); 1639 mutex_unlock(&con->sock_mutex); 1640 } 1641 1642 static void stop_conn(struct connection *con) 1643 { 1644 _stop_conn(con, true); 1645 } 1646 1647 static void connection_release(struct rcu_head *rcu) 1648 { 1649 struct connection *con = container_of(rcu, struct connection, rcu); 1650 1651 kfree(con->rx_buf); 1652 kfree(con); 1653 } 1654 1655 static void free_conn(struct connection *con) 1656 { 1657 close_connection(con, true, true, true); 1658 spin_lock(&connections_lock); 1659 hlist_del_rcu(&con->list); 1660 spin_unlock(&connections_lock); 1661 if (con->othercon) { 1662 clean_one_writequeue(con->othercon); 1663 call_srcu(&connections_srcu, &con->othercon->rcu, 1664 connection_release); 1665 } 1666 clean_one_writequeue(con); 1667 call_srcu(&connections_srcu, &con->rcu, connection_release); 1668 } 1669 1670 static void work_flush(void) 1671 { 1672 int ok; 1673 int i; 1674 struct connection *con; 1675 1676 do { 1677 ok = 1; 1678 foreach_conn(stop_conn); 1679 if (recv_workqueue) 1680 flush_workqueue(recv_workqueue); 1681 if (send_workqueue) 1682 flush_workqueue(send_workqueue); 1683 for (i = 0; i < CONN_HASH_SIZE && ok; i++) { 1684 hlist_for_each_entry_rcu(con, &connection_hash[i], 1685 list) { 1686 ok &= test_bit(CF_READ_PENDING, &con->flags); 1687 ok &= test_bit(CF_WRITE_PENDING, &con->flags); 1688 if (con->othercon) { 1689 ok &= test_bit(CF_READ_PENDING, 1690 &con->othercon->flags); 1691 ok &= test_bit(CF_WRITE_PENDING, 1692 &con->othercon->flags); 1693 } 1694 } 1695 } 1696 } while (!ok); 1697 } 1698 1699 void dlm_lowcomms_stop(void) 1700 { 1701 int idx; 1702 1703 idx = srcu_read_lock(&connections_srcu); 1704 work_flush(); 1705 foreach_conn(free_conn); 1706 srcu_read_unlock(&connections_srcu, idx); 1707 work_stop(); 1708 deinit_local(); 1709 1710 dlm_proto_ops = NULL; 1711 } 1712 1713 static int dlm_listen_for_all(void) 1714 { 1715 struct socket *sock; 1716 int result; 1717 1718 log_print("Using %s for communications", 1719 dlm_proto_ops->name); 1720 1721 result = dlm_proto_ops->listen_validate(); 1722 if (result < 0) 1723 return result; 1724 1725 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1726 SOCK_STREAM, dlm_proto_ops->proto, &sock); 1727 if (result < 0) { 1728 log_print("Can't create comms socket: %d", result); 1729 return result; 1730 } 1731 1732 sock_set_mark(sock->sk, dlm_config.ci_mark); 1733 dlm_proto_ops->listen_sockopts(sock); 1734 1735 result = dlm_proto_ops->listen_bind(sock); 1736 if (result < 0) 1737 goto out; 1738 1739 save_listen_callbacks(sock); 1740 add_listen_sock(sock, &listen_con); 1741 1742 result = sock->ops->listen(sock, 5); 1743 if (result < 0) { 1744 dlm_close_sock(&listen_con.sock); 1745 return result; 1746 } 1747 1748 return 0; 1749 1750 out: 1751 sock_release(sock); 1752 return result; 1753 } 1754 1755 static int dlm_tcp_bind(struct socket *sock) 1756 { 1757 struct sockaddr_storage src_addr; 1758 int result, addr_len; 1759 1760 /* Bind to our cluster-known address connecting to avoid 1761 * routing problems. 1762 */ 1763 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 1764 make_sockaddr(&src_addr, 0, &addr_len); 1765 1766 result = sock->ops->bind(sock, (struct sockaddr *)&src_addr, 1767 addr_len); 1768 if (result < 0) { 1769 /* This *may* not indicate a critical error */ 1770 log_print("could not bind for connect: %d", result); 1771 } 1772 1773 return 0; 1774 } 1775 1776 static int dlm_tcp_connect(struct connection *con, struct socket *sock, 1777 struct sockaddr *addr, int addr_len) 1778 { 1779 int ret; 1780 1781 ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK); 1782 switch (ret) { 1783 case -EINPROGRESS: 1784 fallthrough; 1785 case 0: 1786 return 0; 1787 } 1788 1789 return ret; 1790 } 1791 1792 static int dlm_tcp_listen_validate(void) 1793 { 1794 /* We don't support multi-homed hosts */ 1795 if (dlm_local_count > 1) { 1796 log_print("TCP protocol can't handle multi-homed hosts, try SCTP"); 1797 return -EINVAL; 1798 } 1799 1800 return 0; 1801 } 1802 1803 static void dlm_tcp_sockopts(struct socket *sock) 1804 { 1805 /* Turn off Nagle's algorithm */ 1806 tcp_sock_set_nodelay(sock->sk); 1807 } 1808 1809 static void dlm_tcp_listen_sockopts(struct socket *sock) 1810 { 1811 dlm_tcp_sockopts(sock); 1812 sock_set_reuseaddr(sock->sk); 1813 } 1814 1815 static int dlm_tcp_listen_bind(struct socket *sock) 1816 { 1817 int addr_len; 1818 1819 /* Bind to our port */ 1820 make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len); 1821 return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0], 1822 addr_len); 1823 } 1824 1825 static const struct dlm_proto_ops dlm_tcp_ops = { 1826 .name = "TCP", 1827 .proto = IPPROTO_TCP, 1828 .connect = dlm_tcp_connect, 1829 .sockopts = dlm_tcp_sockopts, 1830 .bind = dlm_tcp_bind, 1831 .listen_validate = dlm_tcp_listen_validate, 1832 .listen_sockopts = dlm_tcp_listen_sockopts, 1833 .listen_bind = dlm_tcp_listen_bind, 1834 }; 1835 1836 static int dlm_sctp_bind(struct socket *sock) 1837 { 1838 return sctp_bind_addrs(sock, 0); 1839 } 1840 1841 static int dlm_sctp_connect(struct connection *con, struct socket *sock, 1842 struct sockaddr *addr, int addr_len) 1843 { 1844 int ret; 1845 1846 /* 1847 * Make sock->ops->connect() function return in specified time, 1848 * since O_NONBLOCK argument in connect() function does not work here, 1849 * then, we should restore the default value of this attribute. 1850 */ 1851 sock_set_sndtimeo(sock->sk, 5); 1852 ret = sock->ops->connect(sock, addr, addr_len, 0); 1853 sock_set_sndtimeo(sock->sk, 0); 1854 if (ret < 0) 1855 return ret; 1856 1857 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) 1858 log_print("connected to node %d", con->nodeid); 1859 1860 return 0; 1861 } 1862 1863 static int dlm_sctp_listen_validate(void) 1864 { 1865 if (!IS_ENABLED(CONFIG_IP_SCTP)) { 1866 log_print("SCTP is not enabled by this kernel"); 1867 return -EOPNOTSUPP; 1868 } 1869 1870 request_module("sctp"); 1871 return 0; 1872 } 1873 1874 static int dlm_sctp_bind_listen(struct socket *sock) 1875 { 1876 return sctp_bind_addrs(sock, dlm_config.ci_tcp_port); 1877 } 1878 1879 static void dlm_sctp_sockopts(struct socket *sock) 1880 { 1881 /* Turn off Nagle's algorithm */ 1882 sctp_sock_set_nodelay(sock->sk); 1883 sock_set_rcvbuf(sock->sk, NEEDED_RMEM); 1884 } 1885 1886 static const struct dlm_proto_ops dlm_sctp_ops = { 1887 .name = "SCTP", 1888 .proto = IPPROTO_SCTP, 1889 .try_new_addr = true, 1890 .connect = dlm_sctp_connect, 1891 .sockopts = dlm_sctp_sockopts, 1892 .bind = dlm_sctp_bind, 1893 .listen_validate = dlm_sctp_listen_validate, 1894 .listen_sockopts = dlm_sctp_sockopts, 1895 .listen_bind = dlm_sctp_bind_listen, 1896 }; 1897 1898 int dlm_lowcomms_start(void) 1899 { 1900 int error = -EINVAL; 1901 1902 init_local(); 1903 if (!dlm_local_count) { 1904 error = -ENOTCONN; 1905 log_print("no local IP address has been set"); 1906 goto fail; 1907 } 1908 1909 error = work_start(); 1910 if (error) 1911 goto fail_local; 1912 1913 /* Start listening */ 1914 switch (dlm_config.ci_protocol) { 1915 case DLM_PROTO_TCP: 1916 dlm_proto_ops = &dlm_tcp_ops; 1917 break; 1918 case DLM_PROTO_SCTP: 1919 dlm_proto_ops = &dlm_sctp_ops; 1920 break; 1921 default: 1922 log_print("Invalid protocol identifier %d set", 1923 dlm_config.ci_protocol); 1924 error = -EINVAL; 1925 goto fail_proto_ops; 1926 } 1927 1928 error = dlm_listen_for_all(); 1929 if (error) 1930 goto fail_listen; 1931 1932 return 0; 1933 1934 fail_listen: 1935 dlm_proto_ops = NULL; 1936 fail_proto_ops: 1937 work_stop(); 1938 fail_local: 1939 deinit_local(); 1940 fail: 1941 return error; 1942 } 1943 1944 void dlm_lowcomms_init(void) 1945 { 1946 int i; 1947 1948 for (i = 0; i < CONN_HASH_SIZE; i++) 1949 INIT_HLIST_HEAD(&connection_hash[i]); 1950 1951 INIT_WORK(&listen_con.rwork, process_listen_recv_socket); 1952 } 1953 1954 void dlm_lowcomms_exit(void) 1955 { 1956 struct dlm_node_addr *na, *safe; 1957 1958 spin_lock(&dlm_node_addrs_spin); 1959 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) { 1960 list_del(&na->list); 1961 while (na->addr_count--) 1962 kfree(na->addr[na->addr_count]); 1963 kfree(na); 1964 } 1965 spin_unlock(&dlm_node_addrs_spin); 1966 } 1967