1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 /* 13 * lowcomms.c 14 * 15 * This is the "low-level" comms layer. 16 * 17 * It is responsible for sending/receiving messages 18 * from other nodes in the cluster. 19 * 20 * Cluster nodes are referred to by their nodeids. nodeids are 21 * simply 32 bit numbers to the locking module - if they need to 22 * be expanded for the cluster infrastructure then that is its 23 * responsibility. It is this layer's 24 * responsibility to resolve these into IP address or 25 * whatever it needs for inter-node communication. 26 * 27 * The comms level is two kernel threads that deal mainly with 28 * the receiving of messages from other nodes and passing them 29 * up to the mid-level comms layer (which understands the 30 * message format) for execution by the locking core, and 31 * a send thread which does all the setting up of connections 32 * to remote nodes and the sending of data. Threads are not allowed 33 * to send their own data because it may cause them to wait in times 34 * of high load. Also, this way, the sending thread can collect together 35 * messages bound for one node and send them in one block. 36 * 37 * lowcomms will choose to use either TCP or SCTP as its transport layer 38 * depending on the configuration variable 'protocol'. This should be set 39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a 40 * cluster-wide mechanism as it must be the same on all nodes of the cluster 41 * for the DLM to function. 42 * 43 */ 44 45 #include <asm/ioctls.h> 46 #include <net/sock.h> 47 #include <net/tcp.h> 48 #include <linux/pagemap.h> 49 #include <linux/file.h> 50 #include <linux/mutex.h> 51 #include <linux/sctp.h> 52 #include <linux/slab.h> 53 #include <net/sctp/sctp.h> 54 #include <net/ipv6.h> 55 56 #include <trace/events/dlm.h> 57 58 #include "dlm_internal.h" 59 #include "lowcomms.h" 60 #include "midcomms.h" 61 #include "memory.h" 62 #include "config.h" 63 64 #define NEEDED_RMEM (4*1024*1024) 65 66 /* Number of messages to send before rescheduling */ 67 #define MAX_SEND_MSG_COUNT 25 68 69 struct connection { 70 struct socket *sock; /* NULL if not connected */ 71 uint32_t nodeid; /* So we know who we are in the list */ 72 struct mutex sock_mutex; 73 unsigned long flags; 74 #define CF_READ_PENDING 1 75 #define CF_WRITE_PENDING 2 76 #define CF_INIT_PENDING 4 77 #define CF_IS_OTHERCON 5 78 #define CF_CLOSE 6 79 #define CF_APP_LIMITED 7 80 #define CF_CLOSING 8 81 #define CF_CONNECTED 9 82 #define CF_RECONNECT 10 83 #define CF_DELAY_CONNECT 11 84 struct list_head writequeue; /* List of outgoing writequeue_entries */ 85 spinlock_t writequeue_lock; 86 int retries; 87 #define MAX_CONNECT_RETRIES 3 88 struct hlist_node list; 89 struct connection *othercon; 90 struct connection *sendcon; 91 struct work_struct rwork; /* Receive workqueue */ 92 struct work_struct swork; /* Send workqueue */ 93 unsigned char *rx_buf; 94 int rx_buflen; 95 int rx_leftover; 96 struct rcu_head rcu; 97 }; 98 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 99 100 struct listen_connection { 101 struct socket *sock; 102 struct work_struct rwork; 103 }; 104 105 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end) 106 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset) 107 108 /* An entry waiting to be sent */ 109 struct writequeue_entry { 110 struct list_head list; 111 struct page *page; 112 int offset; 113 int len; 114 int end; 115 int users; 116 bool dirty; 117 struct connection *con; 118 struct list_head msgs; 119 struct kref ref; 120 }; 121 122 struct dlm_msg { 123 struct writequeue_entry *entry; 124 struct dlm_msg *orig_msg; 125 bool retransmit; 126 void *ppc; 127 int len; 128 int idx; /* new()/commit() idx exchange */ 129 130 struct list_head list; 131 struct kref ref; 132 }; 133 134 struct dlm_node_addr { 135 struct list_head list; 136 int nodeid; 137 int mark; 138 int addr_count; 139 int curr_addr_index; 140 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; 141 }; 142 143 struct dlm_proto_ops { 144 bool try_new_addr; 145 const char *name; 146 int proto; 147 148 int (*connect)(struct connection *con, struct socket *sock, 149 struct sockaddr *addr, int addr_len); 150 void (*sockopts)(struct socket *sock); 151 int (*bind)(struct socket *sock); 152 int (*listen_validate)(void); 153 void (*listen_sockopts)(struct socket *sock); 154 int (*listen_bind)(struct socket *sock); 155 }; 156 157 static struct listen_sock_callbacks { 158 void (*sk_error_report)(struct sock *); 159 void (*sk_data_ready)(struct sock *); 160 void (*sk_state_change)(struct sock *); 161 void (*sk_write_space)(struct sock *); 162 } listen_sock; 163 164 static LIST_HEAD(dlm_node_addrs); 165 static DEFINE_SPINLOCK(dlm_node_addrs_spin); 166 167 static struct listen_connection listen_con; 168 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 169 static int dlm_local_count; 170 171 /* Work queues */ 172 static struct workqueue_struct *recv_workqueue; 173 static struct workqueue_struct *send_workqueue; 174 175 static struct hlist_head connection_hash[CONN_HASH_SIZE]; 176 static DEFINE_SPINLOCK(connections_lock); 177 DEFINE_STATIC_SRCU(connections_srcu); 178 179 static const struct dlm_proto_ops *dlm_proto_ops; 180 181 static void process_recv_sockets(struct work_struct *work); 182 static void process_send_sockets(struct work_struct *work); 183 184 bool dlm_lowcomms_is_running(void) 185 { 186 return !!listen_con.sock; 187 } 188 189 static void writequeue_entry_ctor(void *data) 190 { 191 struct writequeue_entry *entry = data; 192 193 INIT_LIST_HEAD(&entry->msgs); 194 } 195 196 struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void) 197 { 198 return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry), 199 0, 0, writequeue_entry_ctor); 200 } 201 202 struct kmem_cache *dlm_lowcomms_msg_cache_create(void) 203 { 204 return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL); 205 } 206 207 /* need to held writequeue_lock */ 208 static struct writequeue_entry *con_next_wq(struct connection *con) 209 { 210 struct writequeue_entry *e; 211 212 e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry, 213 list); 214 /* if len is zero nothing is to send, if there are users filling 215 * buffers we wait until the users are done so we can send more. 216 */ 217 if (!e || e->users || e->len == 0) 218 return NULL; 219 220 return e; 221 } 222 223 static struct connection *__find_con(int nodeid, int r) 224 { 225 struct connection *con; 226 227 hlist_for_each_entry_rcu(con, &connection_hash[r], list) { 228 if (con->nodeid == nodeid) 229 return con; 230 } 231 232 return NULL; 233 } 234 235 static int dlm_con_init(struct connection *con, int nodeid) 236 { 237 con->rx_buflen = dlm_config.ci_buffer_size; 238 con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS); 239 if (!con->rx_buf) 240 return -ENOMEM; 241 242 con->nodeid = nodeid; 243 mutex_init(&con->sock_mutex); 244 INIT_LIST_HEAD(&con->writequeue); 245 spin_lock_init(&con->writequeue_lock); 246 INIT_WORK(&con->swork, process_send_sockets); 247 INIT_WORK(&con->rwork, process_recv_sockets); 248 249 return 0; 250 } 251 252 /* 253 * If 'allocation' is zero then we don't attempt to create a new 254 * connection structure for this node. 255 */ 256 static struct connection *nodeid2con(int nodeid, gfp_t alloc) 257 { 258 struct connection *con, *tmp; 259 int r, ret; 260 261 r = nodeid_hash(nodeid); 262 con = __find_con(nodeid, r); 263 if (con || !alloc) 264 return con; 265 266 con = kzalloc(sizeof(*con), alloc); 267 if (!con) 268 return NULL; 269 270 ret = dlm_con_init(con, nodeid); 271 if (ret) { 272 kfree(con); 273 return NULL; 274 } 275 276 spin_lock(&connections_lock); 277 /* Because multiple workqueues/threads calls this function it can 278 * race on multiple cpu's. Instead of locking hot path __find_con() 279 * we just check in rare cases of recently added nodes again 280 * under protection of connections_lock. If this is the case we 281 * abort our connection creation and return the existing connection. 282 */ 283 tmp = __find_con(nodeid, r); 284 if (tmp) { 285 spin_unlock(&connections_lock); 286 kfree(con->rx_buf); 287 kfree(con); 288 return tmp; 289 } 290 291 hlist_add_head_rcu(&con->list, &connection_hash[r]); 292 spin_unlock(&connections_lock); 293 294 return con; 295 } 296 297 /* Loop round all connections */ 298 static void foreach_conn(void (*conn_func)(struct connection *c)) 299 { 300 int i; 301 struct connection *con; 302 303 for (i = 0; i < CONN_HASH_SIZE; i++) { 304 hlist_for_each_entry_rcu(con, &connection_hash[i], list) 305 conn_func(con); 306 } 307 } 308 309 static struct dlm_node_addr *find_node_addr(int nodeid) 310 { 311 struct dlm_node_addr *na; 312 313 list_for_each_entry(na, &dlm_node_addrs, list) { 314 if (na->nodeid == nodeid) 315 return na; 316 } 317 return NULL; 318 } 319 320 static int addr_compare(const struct sockaddr_storage *x, 321 const struct sockaddr_storage *y) 322 { 323 switch (x->ss_family) { 324 case AF_INET: { 325 struct sockaddr_in *sinx = (struct sockaddr_in *)x; 326 struct sockaddr_in *siny = (struct sockaddr_in *)y; 327 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) 328 return 0; 329 if (sinx->sin_port != siny->sin_port) 330 return 0; 331 break; 332 } 333 case AF_INET6: { 334 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; 335 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; 336 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) 337 return 0; 338 if (sinx->sin6_port != siny->sin6_port) 339 return 0; 340 break; 341 } 342 default: 343 return 0; 344 } 345 return 1; 346 } 347 348 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, 349 struct sockaddr *sa_out, bool try_new_addr, 350 unsigned int *mark) 351 { 352 struct sockaddr_storage sas; 353 struct dlm_node_addr *na; 354 355 if (!dlm_local_count) 356 return -1; 357 358 spin_lock(&dlm_node_addrs_spin); 359 na = find_node_addr(nodeid); 360 if (na && na->addr_count) { 361 memcpy(&sas, na->addr[na->curr_addr_index], 362 sizeof(struct sockaddr_storage)); 363 364 if (try_new_addr) { 365 na->curr_addr_index++; 366 if (na->curr_addr_index == na->addr_count) 367 na->curr_addr_index = 0; 368 } 369 } 370 spin_unlock(&dlm_node_addrs_spin); 371 372 if (!na) 373 return -EEXIST; 374 375 if (!na->addr_count) 376 return -ENOENT; 377 378 *mark = na->mark; 379 380 if (sas_out) 381 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); 382 383 if (!sa_out) 384 return 0; 385 386 if (dlm_local_addr[0]->ss_family == AF_INET) { 387 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; 388 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; 389 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 390 } else { 391 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas; 392 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out; 393 ret6->sin6_addr = in6->sin6_addr; 394 } 395 396 return 0; 397 } 398 399 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid, 400 unsigned int *mark) 401 { 402 struct dlm_node_addr *na; 403 int rv = -EEXIST; 404 int addr_i; 405 406 spin_lock(&dlm_node_addrs_spin); 407 list_for_each_entry(na, &dlm_node_addrs, list) { 408 if (!na->addr_count) 409 continue; 410 411 for (addr_i = 0; addr_i < na->addr_count; addr_i++) { 412 if (addr_compare(na->addr[addr_i], addr)) { 413 *nodeid = na->nodeid; 414 *mark = na->mark; 415 rv = 0; 416 goto unlock; 417 } 418 } 419 } 420 unlock: 421 spin_unlock(&dlm_node_addrs_spin); 422 return rv; 423 } 424 425 /* caller need to held dlm_node_addrs_spin lock */ 426 static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na, 427 const struct sockaddr_storage *addr) 428 { 429 int i; 430 431 for (i = 0; i < na->addr_count; i++) { 432 if (addr_compare(na->addr[i], addr)) 433 return true; 434 } 435 436 return false; 437 } 438 439 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) 440 { 441 struct sockaddr_storage *new_addr; 442 struct dlm_node_addr *new_node, *na; 443 bool ret; 444 445 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS); 446 if (!new_node) 447 return -ENOMEM; 448 449 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS); 450 if (!new_addr) { 451 kfree(new_node); 452 return -ENOMEM; 453 } 454 455 memcpy(new_addr, addr, len); 456 457 spin_lock(&dlm_node_addrs_spin); 458 na = find_node_addr(nodeid); 459 if (!na) { 460 new_node->nodeid = nodeid; 461 new_node->addr[0] = new_addr; 462 new_node->addr_count = 1; 463 new_node->mark = dlm_config.ci_mark; 464 list_add(&new_node->list, &dlm_node_addrs); 465 spin_unlock(&dlm_node_addrs_spin); 466 return 0; 467 } 468 469 ret = dlm_lowcomms_na_has_addr(na, addr); 470 if (ret) { 471 spin_unlock(&dlm_node_addrs_spin); 472 kfree(new_addr); 473 kfree(new_node); 474 return -EEXIST; 475 } 476 477 if (na->addr_count >= DLM_MAX_ADDR_COUNT) { 478 spin_unlock(&dlm_node_addrs_spin); 479 kfree(new_addr); 480 kfree(new_node); 481 return -ENOSPC; 482 } 483 484 na->addr[na->addr_count++] = new_addr; 485 spin_unlock(&dlm_node_addrs_spin); 486 kfree(new_node); 487 return 0; 488 } 489 490 /* Data available on socket or listen socket received a connect */ 491 static void lowcomms_data_ready(struct sock *sk) 492 { 493 struct connection *con; 494 495 con = sock2con(sk); 496 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 497 queue_work(recv_workqueue, &con->rwork); 498 } 499 500 static void lowcomms_listen_data_ready(struct sock *sk) 501 { 502 queue_work(recv_workqueue, &listen_con.rwork); 503 } 504 505 static void lowcomms_write_space(struct sock *sk) 506 { 507 struct connection *con; 508 509 con = sock2con(sk); 510 if (!con) 511 return; 512 513 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) { 514 log_print("connected to node %d", con->nodeid); 515 queue_work(send_workqueue, &con->swork); 516 return; 517 } 518 519 clear_bit(SOCK_NOSPACE, &con->sock->flags); 520 521 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 522 con->sock->sk->sk_write_pending--; 523 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); 524 } 525 526 queue_work(send_workqueue, &con->swork); 527 } 528 529 static inline void lowcomms_connect_sock(struct connection *con) 530 { 531 if (test_bit(CF_CLOSE, &con->flags)) 532 return; 533 queue_work(send_workqueue, &con->swork); 534 cond_resched(); 535 } 536 537 static void lowcomms_state_change(struct sock *sk) 538 { 539 /* SCTP layer is not calling sk_data_ready when the connection 540 * is done, so we catch the signal through here. Also, it 541 * doesn't switch socket state when entering shutdown, so we 542 * skip the write in that case. 543 */ 544 if (sk->sk_shutdown) { 545 if (sk->sk_shutdown == RCV_SHUTDOWN) 546 lowcomms_data_ready(sk); 547 } else if (sk->sk_state == TCP_ESTABLISHED) { 548 lowcomms_write_space(sk); 549 } 550 } 551 552 int dlm_lowcomms_connect_node(int nodeid) 553 { 554 struct connection *con; 555 int idx; 556 557 if (nodeid == dlm_our_nodeid()) 558 return 0; 559 560 idx = srcu_read_lock(&connections_srcu); 561 con = nodeid2con(nodeid, GFP_NOFS); 562 if (!con) { 563 srcu_read_unlock(&connections_srcu, idx); 564 return -ENOMEM; 565 } 566 567 lowcomms_connect_sock(con); 568 srcu_read_unlock(&connections_srcu, idx); 569 570 return 0; 571 } 572 573 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark) 574 { 575 struct dlm_node_addr *na; 576 577 spin_lock(&dlm_node_addrs_spin); 578 na = find_node_addr(nodeid); 579 if (!na) { 580 spin_unlock(&dlm_node_addrs_spin); 581 return -ENOENT; 582 } 583 584 na->mark = mark; 585 spin_unlock(&dlm_node_addrs_spin); 586 587 return 0; 588 } 589 590 static void lowcomms_error_report(struct sock *sk) 591 { 592 struct connection *con; 593 void (*orig_report)(struct sock *) = NULL; 594 struct inet_sock *inet; 595 596 con = sock2con(sk); 597 if (con == NULL) 598 goto out; 599 600 orig_report = listen_sock.sk_error_report; 601 602 inet = inet_sk(sk); 603 switch (sk->sk_family) { 604 case AF_INET: 605 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 606 "sending to node %d at %pI4, dport %d, " 607 "sk_err=%d/%d\n", dlm_our_nodeid(), 608 con->nodeid, &inet->inet_daddr, 609 ntohs(inet->inet_dport), sk->sk_err, 610 sk->sk_err_soft); 611 break; 612 #if IS_ENABLED(CONFIG_IPV6) 613 case AF_INET6: 614 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 615 "sending to node %d at %pI6c, " 616 "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), 617 con->nodeid, &sk->sk_v6_daddr, 618 ntohs(inet->inet_dport), sk->sk_err, 619 sk->sk_err_soft); 620 break; 621 #endif 622 default: 623 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 624 "invalid socket family %d set, " 625 "sk_err=%d/%d\n", dlm_our_nodeid(), 626 sk->sk_family, sk->sk_err, sk->sk_err_soft); 627 goto out; 628 } 629 630 /* below sendcon only handling */ 631 if (test_bit(CF_IS_OTHERCON, &con->flags)) 632 con = con->sendcon; 633 634 switch (sk->sk_err) { 635 case ECONNREFUSED: 636 set_bit(CF_DELAY_CONNECT, &con->flags); 637 break; 638 default: 639 break; 640 } 641 642 if (!test_and_set_bit(CF_RECONNECT, &con->flags)) 643 queue_work(send_workqueue, &con->swork); 644 645 out: 646 if (orig_report) 647 orig_report(sk); 648 } 649 650 static void restore_callbacks(struct socket *sock) 651 { 652 struct sock *sk = sock->sk; 653 654 lock_sock(sk); 655 sk->sk_user_data = NULL; 656 sk->sk_data_ready = listen_sock.sk_data_ready; 657 sk->sk_state_change = listen_sock.sk_state_change; 658 sk->sk_write_space = listen_sock.sk_write_space; 659 sk->sk_error_report = listen_sock.sk_error_report; 660 release_sock(sk); 661 } 662 663 /* Make a socket active */ 664 static void add_sock(struct socket *sock, struct connection *con) 665 { 666 struct sock *sk = sock->sk; 667 668 lock_sock(sk); 669 con->sock = sock; 670 671 sk->sk_user_data = con; 672 /* Install a data_ready callback */ 673 sk->sk_data_ready = lowcomms_data_ready; 674 sk->sk_write_space = lowcomms_write_space; 675 sk->sk_state_change = lowcomms_state_change; 676 sk->sk_allocation = GFP_NOFS; 677 sk->sk_error_report = lowcomms_error_report; 678 release_sock(sk); 679 } 680 681 /* Add the port number to an IPv6 or 4 sockaddr and return the address 682 length */ 683 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 684 int *addr_len) 685 { 686 saddr->ss_family = dlm_local_addr[0]->ss_family; 687 if (saddr->ss_family == AF_INET) { 688 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 689 in4_addr->sin_port = cpu_to_be16(port); 690 *addr_len = sizeof(struct sockaddr_in); 691 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 692 } else { 693 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 694 in6_addr->sin6_port = cpu_to_be16(port); 695 *addr_len = sizeof(struct sockaddr_in6); 696 } 697 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 698 } 699 700 static void dlm_page_release(struct kref *kref) 701 { 702 struct writequeue_entry *e = container_of(kref, struct writequeue_entry, 703 ref); 704 705 __free_page(e->page); 706 dlm_free_writequeue(e); 707 } 708 709 static void dlm_msg_release(struct kref *kref) 710 { 711 struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref); 712 713 kref_put(&msg->entry->ref, dlm_page_release); 714 dlm_free_msg(msg); 715 } 716 717 static void free_entry(struct writequeue_entry *e) 718 { 719 struct dlm_msg *msg, *tmp; 720 721 list_for_each_entry_safe(msg, tmp, &e->msgs, list) { 722 if (msg->orig_msg) { 723 msg->orig_msg->retransmit = false; 724 kref_put(&msg->orig_msg->ref, dlm_msg_release); 725 } 726 727 list_del(&msg->list); 728 kref_put(&msg->ref, dlm_msg_release); 729 } 730 731 list_del(&e->list); 732 kref_put(&e->ref, dlm_page_release); 733 } 734 735 static void dlm_close_sock(struct socket **sock) 736 { 737 if (*sock) { 738 restore_callbacks(*sock); 739 sock_release(*sock); 740 *sock = NULL; 741 } 742 } 743 744 /* Close a remote connection and tidy up */ 745 static void close_connection(struct connection *con, bool and_other, 746 bool tx, bool rx) 747 { 748 bool closing = test_and_set_bit(CF_CLOSING, &con->flags); 749 struct writequeue_entry *e; 750 751 if (tx && !closing && cancel_work_sync(&con->swork)) { 752 log_print("canceled swork for node %d", con->nodeid); 753 clear_bit(CF_WRITE_PENDING, &con->flags); 754 } 755 if (rx && !closing && cancel_work_sync(&con->rwork)) { 756 log_print("canceled rwork for node %d", con->nodeid); 757 clear_bit(CF_READ_PENDING, &con->flags); 758 } 759 760 mutex_lock(&con->sock_mutex); 761 dlm_close_sock(&con->sock); 762 763 if (con->othercon && and_other) { 764 /* Will only re-enter once. */ 765 close_connection(con->othercon, false, tx, rx); 766 } 767 768 /* if we send a writequeue entry only a half way, we drop the 769 * whole entry because reconnection and that we not start of the 770 * middle of a msg which will confuse the other end. 771 * 772 * we can always drop messages because retransmits, but what we 773 * cannot allow is to transmit half messages which may be processed 774 * at the other side. 775 * 776 * our policy is to start on a clean state when disconnects, we don't 777 * know what's send/received on transport layer in this case. 778 */ 779 spin_lock(&con->writequeue_lock); 780 if (!list_empty(&con->writequeue)) { 781 e = list_first_entry(&con->writequeue, struct writequeue_entry, 782 list); 783 if (e->dirty) 784 free_entry(e); 785 } 786 spin_unlock(&con->writequeue_lock); 787 788 con->rx_leftover = 0; 789 con->retries = 0; 790 clear_bit(CF_APP_LIMITED, &con->flags); 791 clear_bit(CF_CONNECTED, &con->flags); 792 clear_bit(CF_DELAY_CONNECT, &con->flags); 793 clear_bit(CF_RECONNECT, &con->flags); 794 mutex_unlock(&con->sock_mutex); 795 clear_bit(CF_CLOSING, &con->flags); 796 } 797 798 static int con_realloc_receive_buf(struct connection *con, int newlen) 799 { 800 unsigned char *newbuf; 801 802 newbuf = kmalloc(newlen, GFP_NOFS); 803 if (!newbuf) 804 return -ENOMEM; 805 806 /* copy any leftover from last receive */ 807 if (con->rx_leftover) 808 memmove(newbuf, con->rx_buf, con->rx_leftover); 809 810 /* swap to new buffer space */ 811 kfree(con->rx_buf); 812 con->rx_buflen = newlen; 813 con->rx_buf = newbuf; 814 815 return 0; 816 } 817 818 /* Data received from remote end */ 819 static int receive_from_sock(struct connection *con) 820 { 821 struct msghdr msg; 822 struct kvec iov; 823 int ret, buflen; 824 825 mutex_lock(&con->sock_mutex); 826 827 if (con->sock == NULL) { 828 ret = -EAGAIN; 829 goto out_close; 830 } 831 832 /* realloc if we get new buffer size to read out */ 833 buflen = dlm_config.ci_buffer_size; 834 if (con->rx_buflen != buflen && con->rx_leftover <= buflen) { 835 ret = con_realloc_receive_buf(con, buflen); 836 if (ret < 0) 837 goto out_resched; 838 } 839 840 for (;;) { 841 /* calculate new buffer parameter regarding last receive and 842 * possible leftover bytes 843 */ 844 iov.iov_base = con->rx_buf + con->rx_leftover; 845 iov.iov_len = con->rx_buflen - con->rx_leftover; 846 847 memset(&msg, 0, sizeof(msg)); 848 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 849 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, 850 msg.msg_flags); 851 trace_dlm_recv(con->nodeid, ret); 852 if (ret == -EAGAIN) 853 break; 854 else if (ret <= 0) 855 goto out_close; 856 857 /* new buflen according readed bytes and leftover from last receive */ 858 buflen = ret + con->rx_leftover; 859 ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen); 860 if (ret < 0) 861 goto out_close; 862 863 /* calculate leftover bytes from process and put it into begin of 864 * the receive buffer, so next receive we have the full message 865 * at the start address of the receive buffer. 866 */ 867 con->rx_leftover = buflen - ret; 868 if (con->rx_leftover) { 869 memmove(con->rx_buf, con->rx_buf + ret, 870 con->rx_leftover); 871 } 872 } 873 874 dlm_midcomms_receive_done(con->nodeid); 875 mutex_unlock(&con->sock_mutex); 876 return 0; 877 878 out_resched: 879 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 880 queue_work(recv_workqueue, &con->rwork); 881 mutex_unlock(&con->sock_mutex); 882 return -EAGAIN; 883 884 out_close: 885 if (ret == 0) { 886 log_print("connection %p got EOF from %d", 887 con, con->nodeid); 888 889 mutex_unlock(&con->sock_mutex); 890 close_connection(con, false, true, false); 891 /* signal to breaking receive worker */ 892 ret = -1; 893 } else { 894 mutex_unlock(&con->sock_mutex); 895 } 896 return ret; 897 } 898 899 /* Listening socket is busy, accept a connection */ 900 static int accept_from_sock(struct listen_connection *con) 901 { 902 int result; 903 struct sockaddr_storage peeraddr; 904 struct socket *newsock; 905 int len, idx; 906 int nodeid; 907 struct connection *newcon; 908 struct connection *addcon; 909 unsigned int mark; 910 911 if (!con->sock) 912 return -ENOTCONN; 913 914 result = kernel_accept(con->sock, &newsock, O_NONBLOCK); 915 if (result < 0) 916 goto accept_err; 917 918 /* Get the connected socket's peer */ 919 memset(&peeraddr, 0, sizeof(peeraddr)); 920 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); 921 if (len < 0) { 922 result = -ECONNABORTED; 923 goto accept_err; 924 } 925 926 /* Get the new node's NODEID */ 927 make_sockaddr(&peeraddr, 0, &len); 928 if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) { 929 switch (peeraddr.ss_family) { 930 case AF_INET: { 931 struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr; 932 933 log_print("connect from non cluster IPv4 node %pI4", 934 &sin->sin_addr); 935 break; 936 } 937 #if IS_ENABLED(CONFIG_IPV6) 938 case AF_INET6: { 939 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr; 940 941 log_print("connect from non cluster IPv6 node %pI6c", 942 &sin6->sin6_addr); 943 break; 944 } 945 #endif 946 default: 947 log_print("invalid family from non cluster node"); 948 break; 949 } 950 951 sock_release(newsock); 952 return -1; 953 } 954 955 log_print("got connection from %d", nodeid); 956 957 /* Check to see if we already have a connection to this node. This 958 * could happen if the two nodes initiate a connection at roughly 959 * the same time and the connections cross on the wire. 960 * In this case we store the incoming one in "othercon" 961 */ 962 idx = srcu_read_lock(&connections_srcu); 963 newcon = nodeid2con(nodeid, GFP_NOFS); 964 if (!newcon) { 965 srcu_read_unlock(&connections_srcu, idx); 966 result = -ENOMEM; 967 goto accept_err; 968 } 969 970 sock_set_mark(newsock->sk, mark); 971 972 mutex_lock(&newcon->sock_mutex); 973 if (newcon->sock) { 974 struct connection *othercon = newcon->othercon; 975 976 if (!othercon) { 977 othercon = kzalloc(sizeof(*othercon), GFP_NOFS); 978 if (!othercon) { 979 log_print("failed to allocate incoming socket"); 980 mutex_unlock(&newcon->sock_mutex); 981 srcu_read_unlock(&connections_srcu, idx); 982 result = -ENOMEM; 983 goto accept_err; 984 } 985 986 result = dlm_con_init(othercon, nodeid); 987 if (result < 0) { 988 kfree(othercon); 989 mutex_unlock(&newcon->sock_mutex); 990 srcu_read_unlock(&connections_srcu, idx); 991 goto accept_err; 992 } 993 994 lockdep_set_subclass(&othercon->sock_mutex, 1); 995 set_bit(CF_IS_OTHERCON, &othercon->flags); 996 newcon->othercon = othercon; 997 othercon->sendcon = newcon; 998 } else { 999 /* close other sock con if we have something new */ 1000 close_connection(othercon, false, true, false); 1001 } 1002 1003 mutex_lock(&othercon->sock_mutex); 1004 add_sock(newsock, othercon); 1005 addcon = othercon; 1006 mutex_unlock(&othercon->sock_mutex); 1007 } 1008 else { 1009 /* accept copies the sk after we've saved the callbacks, so we 1010 don't want to save them a second time or comm errors will 1011 result in calling sk_error_report recursively. */ 1012 add_sock(newsock, newcon); 1013 addcon = newcon; 1014 } 1015 1016 set_bit(CF_CONNECTED, &addcon->flags); 1017 mutex_unlock(&newcon->sock_mutex); 1018 1019 /* 1020 * Add it to the active queue in case we got data 1021 * between processing the accept adding the socket 1022 * to the read_sockets list 1023 */ 1024 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 1025 queue_work(recv_workqueue, &addcon->rwork); 1026 1027 srcu_read_unlock(&connections_srcu, idx); 1028 1029 return 0; 1030 1031 accept_err: 1032 if (newsock) 1033 sock_release(newsock); 1034 1035 if (result != -EAGAIN) 1036 log_print("error accepting connection from node: %d", result); 1037 return result; 1038 } 1039 1040 /* 1041 * writequeue_entry_complete - try to delete and free write queue entry 1042 * @e: write queue entry to try to delete 1043 * @completed: bytes completed 1044 * 1045 * writequeue_lock must be held. 1046 */ 1047 static void writequeue_entry_complete(struct writequeue_entry *e, int completed) 1048 { 1049 e->offset += completed; 1050 e->len -= completed; 1051 /* signal that page was half way transmitted */ 1052 e->dirty = true; 1053 1054 if (e->len == 0 && e->users == 0) 1055 free_entry(e); 1056 } 1057 1058 /* 1059 * sctp_bind_addrs - bind a SCTP socket to all our addresses 1060 */ 1061 static int sctp_bind_addrs(struct socket *sock, uint16_t port) 1062 { 1063 struct sockaddr_storage localaddr; 1064 struct sockaddr *addr = (struct sockaddr *)&localaddr; 1065 int i, addr_len, result = 0; 1066 1067 for (i = 0; i < dlm_local_count; i++) { 1068 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1069 make_sockaddr(&localaddr, port, &addr_len); 1070 1071 if (!i) 1072 result = kernel_bind(sock, addr, addr_len); 1073 else 1074 result = sock_bind_add(sock->sk, addr, addr_len); 1075 1076 if (result < 0) { 1077 log_print("Can't bind to %d addr number %d, %d.\n", 1078 port, i + 1, result); 1079 break; 1080 } 1081 } 1082 return result; 1083 } 1084 1085 /* Get local addresses */ 1086 static void init_local(void) 1087 { 1088 struct sockaddr_storage sas, *addr; 1089 int i; 1090 1091 dlm_local_count = 0; 1092 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { 1093 if (dlm_our_addr(&sas, i)) 1094 break; 1095 1096 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS); 1097 if (!addr) 1098 break; 1099 dlm_local_addr[dlm_local_count++] = addr; 1100 } 1101 } 1102 1103 static void deinit_local(void) 1104 { 1105 int i; 1106 1107 for (i = 0; i < dlm_local_count; i++) 1108 kfree(dlm_local_addr[i]); 1109 } 1110 1111 static struct writequeue_entry *new_writequeue_entry(struct connection *con) 1112 { 1113 struct writequeue_entry *entry; 1114 1115 entry = dlm_allocate_writequeue(); 1116 if (!entry) 1117 return NULL; 1118 1119 entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO); 1120 if (!entry->page) { 1121 dlm_free_writequeue(entry); 1122 return NULL; 1123 } 1124 1125 entry->offset = 0; 1126 entry->len = 0; 1127 entry->end = 0; 1128 entry->dirty = false; 1129 entry->con = con; 1130 entry->users = 1; 1131 kref_init(&entry->ref); 1132 return entry; 1133 } 1134 1135 static struct writequeue_entry *new_wq_entry(struct connection *con, int len, 1136 char **ppc, void (*cb)(void *data), 1137 void *data) 1138 { 1139 struct writequeue_entry *e; 1140 1141 spin_lock(&con->writequeue_lock); 1142 if (!list_empty(&con->writequeue)) { 1143 e = list_last_entry(&con->writequeue, struct writequeue_entry, list); 1144 if (DLM_WQ_REMAIN_BYTES(e) >= len) { 1145 kref_get(&e->ref); 1146 1147 *ppc = page_address(e->page) + e->end; 1148 if (cb) 1149 cb(data); 1150 1151 e->end += len; 1152 e->users++; 1153 goto out; 1154 } 1155 } 1156 1157 e = new_writequeue_entry(con); 1158 if (!e) 1159 goto out; 1160 1161 kref_get(&e->ref); 1162 *ppc = page_address(e->page); 1163 e->end += len; 1164 if (cb) 1165 cb(data); 1166 1167 list_add_tail(&e->list, &con->writequeue); 1168 1169 out: 1170 spin_unlock(&con->writequeue_lock); 1171 return e; 1172 }; 1173 1174 static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, 1175 gfp_t allocation, char **ppc, 1176 void (*cb)(void *data), 1177 void *data) 1178 { 1179 struct writequeue_entry *e; 1180 struct dlm_msg *msg; 1181 1182 msg = dlm_allocate_msg(allocation); 1183 if (!msg) 1184 return NULL; 1185 1186 kref_init(&msg->ref); 1187 1188 e = new_wq_entry(con, len, ppc, cb, data); 1189 if (!e) { 1190 dlm_free_msg(msg); 1191 return NULL; 1192 } 1193 1194 msg->retransmit = false; 1195 msg->orig_msg = NULL; 1196 msg->ppc = *ppc; 1197 msg->len = len; 1198 msg->entry = e; 1199 1200 return msg; 1201 } 1202 1203 /* avoid false positive for nodes_srcu, unlock happens in 1204 * dlm_lowcomms_commit_msg which is a must call if success 1205 */ 1206 #ifndef __CHECKER__ 1207 struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, 1208 char **ppc, void (*cb)(void *data), 1209 void *data) 1210 { 1211 struct connection *con; 1212 struct dlm_msg *msg; 1213 int idx; 1214 1215 if (len > DLM_MAX_SOCKET_BUFSIZE || 1216 len < sizeof(struct dlm_header)) { 1217 BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE); 1218 log_print("failed to allocate a buffer of size %d", len); 1219 WARN_ON(1); 1220 return NULL; 1221 } 1222 1223 idx = srcu_read_lock(&connections_srcu); 1224 con = nodeid2con(nodeid, allocation); 1225 if (!con) { 1226 srcu_read_unlock(&connections_srcu, idx); 1227 return NULL; 1228 } 1229 1230 msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data); 1231 if (!msg) { 1232 srcu_read_unlock(&connections_srcu, idx); 1233 return NULL; 1234 } 1235 1236 /* for dlm_lowcomms_commit_msg() */ 1237 kref_get(&msg->ref); 1238 /* we assume if successful commit must called */ 1239 msg->idx = idx; 1240 return msg; 1241 } 1242 #endif 1243 1244 static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg) 1245 { 1246 struct writequeue_entry *e = msg->entry; 1247 struct connection *con = e->con; 1248 int users; 1249 1250 spin_lock(&con->writequeue_lock); 1251 kref_get(&msg->ref); 1252 list_add(&msg->list, &e->msgs); 1253 1254 users = --e->users; 1255 if (users) 1256 goto out; 1257 1258 e->len = DLM_WQ_LENGTH_BYTES(e); 1259 spin_unlock(&con->writequeue_lock); 1260 1261 queue_work(send_workqueue, &con->swork); 1262 return; 1263 1264 out: 1265 spin_unlock(&con->writequeue_lock); 1266 return; 1267 } 1268 1269 /* avoid false positive for nodes_srcu, lock was happen in 1270 * dlm_lowcomms_new_msg 1271 */ 1272 #ifndef __CHECKER__ 1273 void dlm_lowcomms_commit_msg(struct dlm_msg *msg) 1274 { 1275 _dlm_lowcomms_commit_msg(msg); 1276 srcu_read_unlock(&connections_srcu, msg->idx); 1277 /* because dlm_lowcomms_new_msg() */ 1278 kref_put(&msg->ref, dlm_msg_release); 1279 } 1280 #endif 1281 1282 void dlm_lowcomms_put_msg(struct dlm_msg *msg) 1283 { 1284 kref_put(&msg->ref, dlm_msg_release); 1285 } 1286 1287 /* does not held connections_srcu, usage workqueue only */ 1288 int dlm_lowcomms_resend_msg(struct dlm_msg *msg) 1289 { 1290 struct dlm_msg *msg_resend; 1291 char *ppc; 1292 1293 if (msg->retransmit) 1294 return 1; 1295 1296 msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, 1297 GFP_ATOMIC, &ppc, NULL, NULL); 1298 if (!msg_resend) 1299 return -ENOMEM; 1300 1301 msg->retransmit = true; 1302 kref_get(&msg->ref); 1303 msg_resend->orig_msg = msg; 1304 1305 memcpy(ppc, msg->ppc, msg->len); 1306 _dlm_lowcomms_commit_msg(msg_resend); 1307 dlm_lowcomms_put_msg(msg_resend); 1308 1309 return 0; 1310 } 1311 1312 /* Send a message */ 1313 static void send_to_sock(struct connection *con) 1314 { 1315 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1316 struct writequeue_entry *e; 1317 int len, offset, ret; 1318 int count; 1319 1320 again: 1321 count = 0; 1322 1323 mutex_lock(&con->sock_mutex); 1324 if (con->sock == NULL) 1325 goto out_connect; 1326 1327 spin_lock(&con->writequeue_lock); 1328 for (;;) { 1329 e = con_next_wq(con); 1330 if (!e) 1331 break; 1332 1333 len = e->len; 1334 offset = e->offset; 1335 BUG_ON(len == 0 && e->users == 0); 1336 spin_unlock(&con->writequeue_lock); 1337 1338 ret = kernel_sendpage(con->sock, e->page, offset, len, 1339 msg_flags); 1340 trace_dlm_send(con->nodeid, ret); 1341 if (ret == -EAGAIN || ret == 0) { 1342 if (ret == -EAGAIN && 1343 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && 1344 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1345 /* Notify TCP that we're limited by the 1346 * application window size. 1347 */ 1348 set_bit(SOCK_NOSPACE, &con->sock->flags); 1349 con->sock->sk->sk_write_pending++; 1350 } 1351 cond_resched(); 1352 goto out; 1353 } else if (ret < 0) 1354 goto out; 1355 1356 spin_lock(&con->writequeue_lock); 1357 writequeue_entry_complete(e, ret); 1358 1359 /* Don't starve people filling buffers */ 1360 if (++count >= MAX_SEND_MSG_COUNT) { 1361 spin_unlock(&con->writequeue_lock); 1362 mutex_unlock(&con->sock_mutex); 1363 cond_resched(); 1364 goto again; 1365 } 1366 } 1367 spin_unlock(&con->writequeue_lock); 1368 1369 out: 1370 mutex_unlock(&con->sock_mutex); 1371 return; 1372 1373 out_connect: 1374 mutex_unlock(&con->sock_mutex); 1375 queue_work(send_workqueue, &con->swork); 1376 cond_resched(); 1377 } 1378 1379 static void clean_one_writequeue(struct connection *con) 1380 { 1381 struct writequeue_entry *e, *safe; 1382 1383 spin_lock(&con->writequeue_lock); 1384 list_for_each_entry_safe(e, safe, &con->writequeue, list) { 1385 free_entry(e); 1386 } 1387 spin_unlock(&con->writequeue_lock); 1388 } 1389 1390 /* Called from recovery when it knows that a node has 1391 left the cluster */ 1392 int dlm_lowcomms_close(int nodeid) 1393 { 1394 struct connection *con; 1395 struct dlm_node_addr *na; 1396 int idx; 1397 1398 log_print("closing connection to node %d", nodeid); 1399 idx = srcu_read_lock(&connections_srcu); 1400 con = nodeid2con(nodeid, 0); 1401 if (con) { 1402 set_bit(CF_CLOSE, &con->flags); 1403 close_connection(con, true, true, true); 1404 clean_one_writequeue(con); 1405 if (con->othercon) 1406 clean_one_writequeue(con->othercon); 1407 } 1408 srcu_read_unlock(&connections_srcu, idx); 1409 1410 spin_lock(&dlm_node_addrs_spin); 1411 na = find_node_addr(nodeid); 1412 if (na) { 1413 list_del(&na->list); 1414 while (na->addr_count--) 1415 kfree(na->addr[na->addr_count]); 1416 kfree(na); 1417 } 1418 spin_unlock(&dlm_node_addrs_spin); 1419 1420 return 0; 1421 } 1422 1423 /* Receive workqueue function */ 1424 static void process_recv_sockets(struct work_struct *work) 1425 { 1426 struct connection *con = container_of(work, struct connection, rwork); 1427 1428 clear_bit(CF_READ_PENDING, &con->flags); 1429 receive_from_sock(con); 1430 } 1431 1432 static void process_listen_recv_socket(struct work_struct *work) 1433 { 1434 int ret; 1435 1436 do { 1437 ret = accept_from_sock(&listen_con); 1438 } while (!ret); 1439 } 1440 1441 static void dlm_connect(struct connection *con) 1442 { 1443 struct sockaddr_storage addr; 1444 int result, addr_len; 1445 struct socket *sock; 1446 unsigned int mark; 1447 1448 /* Some odd races can cause double-connects, ignore them */ 1449 if (con->retries++ > MAX_CONNECT_RETRIES) 1450 return; 1451 1452 if (con->sock) { 1453 log_print("node %d already connected.", con->nodeid); 1454 return; 1455 } 1456 1457 memset(&addr, 0, sizeof(addr)); 1458 result = nodeid_to_addr(con->nodeid, &addr, NULL, 1459 dlm_proto_ops->try_new_addr, &mark); 1460 if (result < 0) { 1461 log_print("no address for nodeid %d", con->nodeid); 1462 return; 1463 } 1464 1465 /* Create a socket to communicate with */ 1466 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1467 SOCK_STREAM, dlm_proto_ops->proto, &sock); 1468 if (result < 0) 1469 goto socket_err; 1470 1471 sock_set_mark(sock->sk, mark); 1472 dlm_proto_ops->sockopts(sock); 1473 1474 add_sock(sock, con); 1475 1476 result = dlm_proto_ops->bind(sock); 1477 if (result < 0) 1478 goto add_sock_err; 1479 1480 log_print_ratelimited("connecting to %d", con->nodeid); 1481 make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len); 1482 result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr, 1483 addr_len); 1484 if (result < 0) 1485 goto add_sock_err; 1486 1487 return; 1488 1489 add_sock_err: 1490 dlm_close_sock(&con->sock); 1491 1492 socket_err: 1493 /* 1494 * Some errors are fatal and this list might need adjusting. For other 1495 * errors we try again until the max number of retries is reached. 1496 */ 1497 if (result != -EHOSTUNREACH && 1498 result != -ENETUNREACH && 1499 result != -ENETDOWN && 1500 result != -EINVAL && 1501 result != -EPROTONOSUPPORT) { 1502 log_print("connect %d try %d error %d", con->nodeid, 1503 con->retries, result); 1504 msleep(1000); 1505 lowcomms_connect_sock(con); 1506 } 1507 } 1508 1509 /* Send workqueue function */ 1510 static void process_send_sockets(struct work_struct *work) 1511 { 1512 struct connection *con = container_of(work, struct connection, swork); 1513 1514 WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags)); 1515 1516 clear_bit(CF_WRITE_PENDING, &con->flags); 1517 1518 if (test_and_clear_bit(CF_RECONNECT, &con->flags)) { 1519 close_connection(con, false, false, true); 1520 dlm_midcomms_unack_msg_resend(con->nodeid); 1521 } 1522 1523 if (con->sock == NULL) { 1524 if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags)) 1525 msleep(1000); 1526 1527 mutex_lock(&con->sock_mutex); 1528 dlm_connect(con); 1529 mutex_unlock(&con->sock_mutex); 1530 } 1531 1532 if (!list_empty(&con->writequeue)) 1533 send_to_sock(con); 1534 } 1535 1536 static void work_stop(void) 1537 { 1538 if (recv_workqueue) { 1539 destroy_workqueue(recv_workqueue); 1540 recv_workqueue = NULL; 1541 } 1542 1543 if (send_workqueue) { 1544 destroy_workqueue(send_workqueue); 1545 send_workqueue = NULL; 1546 } 1547 } 1548 1549 static int work_start(void) 1550 { 1551 recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM); 1552 if (!recv_workqueue) { 1553 log_print("can't start dlm_recv"); 1554 return -ENOMEM; 1555 } 1556 1557 send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM); 1558 if (!send_workqueue) { 1559 log_print("can't start dlm_send"); 1560 destroy_workqueue(recv_workqueue); 1561 recv_workqueue = NULL; 1562 return -ENOMEM; 1563 } 1564 1565 return 0; 1566 } 1567 1568 void dlm_lowcomms_shutdown(void) 1569 { 1570 /* stop lowcomms_listen_data_ready calls */ 1571 lock_sock(listen_con.sock->sk); 1572 listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready; 1573 release_sock(listen_con.sock->sk); 1574 1575 cancel_work_sync(&listen_con.rwork); 1576 dlm_close_sock(&listen_con.sock); 1577 } 1578 1579 void dlm_lowcomms_shutdown_node(int nodeid, bool force) 1580 { 1581 struct connection *con; 1582 int idx; 1583 1584 idx = srcu_read_lock(&connections_srcu); 1585 con = nodeid2con(nodeid, 0); 1586 if (WARN_ON_ONCE(!con)) { 1587 srcu_read_unlock(&connections_srcu, idx); 1588 return; 1589 } 1590 1591 flush_work(&con->swork); 1592 WARN_ON_ONCE(!force && !list_empty(&con->writequeue)); 1593 clean_one_writequeue(con); 1594 if (con->othercon) 1595 clean_one_writequeue(con->othercon); 1596 close_connection(con, true, true, true); 1597 srcu_read_unlock(&connections_srcu, idx); 1598 } 1599 1600 static void _stop_conn(struct connection *con, bool and_other) 1601 { 1602 mutex_lock(&con->sock_mutex); 1603 set_bit(CF_CLOSE, &con->flags); 1604 set_bit(CF_READ_PENDING, &con->flags); 1605 set_bit(CF_WRITE_PENDING, &con->flags); 1606 if (con->sock && con->sock->sk) { 1607 lock_sock(con->sock->sk); 1608 con->sock->sk->sk_user_data = NULL; 1609 release_sock(con->sock->sk); 1610 } 1611 if (con->othercon && and_other) 1612 _stop_conn(con->othercon, false); 1613 mutex_unlock(&con->sock_mutex); 1614 } 1615 1616 static void stop_conn(struct connection *con) 1617 { 1618 _stop_conn(con, true); 1619 } 1620 1621 static void connection_release(struct rcu_head *rcu) 1622 { 1623 struct connection *con = container_of(rcu, struct connection, rcu); 1624 1625 kfree(con->rx_buf); 1626 kfree(con); 1627 } 1628 1629 static void free_conn(struct connection *con) 1630 { 1631 close_connection(con, true, true, true); 1632 spin_lock(&connections_lock); 1633 hlist_del_rcu(&con->list); 1634 spin_unlock(&connections_lock); 1635 if (con->othercon) { 1636 clean_one_writequeue(con->othercon); 1637 call_srcu(&connections_srcu, &con->othercon->rcu, 1638 connection_release); 1639 } 1640 clean_one_writequeue(con); 1641 call_srcu(&connections_srcu, &con->rcu, connection_release); 1642 } 1643 1644 static void work_flush(void) 1645 { 1646 int ok; 1647 int i; 1648 struct connection *con; 1649 1650 do { 1651 ok = 1; 1652 foreach_conn(stop_conn); 1653 if (recv_workqueue) 1654 flush_workqueue(recv_workqueue); 1655 if (send_workqueue) 1656 flush_workqueue(send_workqueue); 1657 for (i = 0; i < CONN_HASH_SIZE && ok; i++) { 1658 hlist_for_each_entry_rcu(con, &connection_hash[i], 1659 list) { 1660 ok &= test_bit(CF_READ_PENDING, &con->flags); 1661 ok &= test_bit(CF_WRITE_PENDING, &con->flags); 1662 if (con->othercon) { 1663 ok &= test_bit(CF_READ_PENDING, 1664 &con->othercon->flags); 1665 ok &= test_bit(CF_WRITE_PENDING, 1666 &con->othercon->flags); 1667 } 1668 } 1669 } 1670 } while (!ok); 1671 } 1672 1673 void dlm_lowcomms_stop(void) 1674 { 1675 int idx; 1676 1677 idx = srcu_read_lock(&connections_srcu); 1678 work_flush(); 1679 foreach_conn(free_conn); 1680 srcu_read_unlock(&connections_srcu, idx); 1681 work_stop(); 1682 deinit_local(); 1683 1684 dlm_proto_ops = NULL; 1685 } 1686 1687 static int dlm_listen_for_all(void) 1688 { 1689 struct socket *sock; 1690 int result; 1691 1692 log_print("Using %s for communications", 1693 dlm_proto_ops->name); 1694 1695 result = dlm_proto_ops->listen_validate(); 1696 if (result < 0) 1697 return result; 1698 1699 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1700 SOCK_STREAM, dlm_proto_ops->proto, &sock); 1701 if (result < 0) { 1702 log_print("Can't create comms socket: %d", result); 1703 return result; 1704 } 1705 1706 sock_set_mark(sock->sk, dlm_config.ci_mark); 1707 dlm_proto_ops->listen_sockopts(sock); 1708 1709 result = dlm_proto_ops->listen_bind(sock); 1710 if (result < 0) 1711 goto out; 1712 1713 lock_sock(sock->sk); 1714 listen_sock.sk_data_ready = sock->sk->sk_data_ready; 1715 listen_sock.sk_write_space = sock->sk->sk_write_space; 1716 listen_sock.sk_error_report = sock->sk->sk_error_report; 1717 listen_sock.sk_state_change = sock->sk->sk_state_change; 1718 1719 listen_con.sock = sock; 1720 1721 sock->sk->sk_allocation = GFP_NOFS; 1722 sock->sk->sk_data_ready = lowcomms_listen_data_ready; 1723 release_sock(sock->sk); 1724 1725 result = sock->ops->listen(sock, 5); 1726 if (result < 0) { 1727 dlm_close_sock(&listen_con.sock); 1728 return result; 1729 } 1730 1731 return 0; 1732 1733 out: 1734 sock_release(sock); 1735 return result; 1736 } 1737 1738 static int dlm_tcp_bind(struct socket *sock) 1739 { 1740 struct sockaddr_storage src_addr; 1741 int result, addr_len; 1742 1743 /* Bind to our cluster-known address connecting to avoid 1744 * routing problems. 1745 */ 1746 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 1747 make_sockaddr(&src_addr, 0, &addr_len); 1748 1749 result = sock->ops->bind(sock, (struct sockaddr *)&src_addr, 1750 addr_len); 1751 if (result < 0) { 1752 /* This *may* not indicate a critical error */ 1753 log_print("could not bind for connect: %d", result); 1754 } 1755 1756 return 0; 1757 } 1758 1759 static int dlm_tcp_connect(struct connection *con, struct socket *sock, 1760 struct sockaddr *addr, int addr_len) 1761 { 1762 int ret; 1763 1764 ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK); 1765 switch (ret) { 1766 case -EINPROGRESS: 1767 fallthrough; 1768 case 0: 1769 return 0; 1770 } 1771 1772 return ret; 1773 } 1774 1775 static int dlm_tcp_listen_validate(void) 1776 { 1777 /* We don't support multi-homed hosts */ 1778 if (dlm_local_count > 1) { 1779 log_print("TCP protocol can't handle multi-homed hosts, try SCTP"); 1780 return -EINVAL; 1781 } 1782 1783 return 0; 1784 } 1785 1786 static void dlm_tcp_sockopts(struct socket *sock) 1787 { 1788 /* Turn off Nagle's algorithm */ 1789 tcp_sock_set_nodelay(sock->sk); 1790 } 1791 1792 static void dlm_tcp_listen_sockopts(struct socket *sock) 1793 { 1794 dlm_tcp_sockopts(sock); 1795 sock_set_reuseaddr(sock->sk); 1796 } 1797 1798 static int dlm_tcp_listen_bind(struct socket *sock) 1799 { 1800 int addr_len; 1801 1802 /* Bind to our port */ 1803 make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len); 1804 return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0], 1805 addr_len); 1806 } 1807 1808 static const struct dlm_proto_ops dlm_tcp_ops = { 1809 .name = "TCP", 1810 .proto = IPPROTO_TCP, 1811 .connect = dlm_tcp_connect, 1812 .sockopts = dlm_tcp_sockopts, 1813 .bind = dlm_tcp_bind, 1814 .listen_validate = dlm_tcp_listen_validate, 1815 .listen_sockopts = dlm_tcp_listen_sockopts, 1816 .listen_bind = dlm_tcp_listen_bind, 1817 }; 1818 1819 static int dlm_sctp_bind(struct socket *sock) 1820 { 1821 return sctp_bind_addrs(sock, 0); 1822 } 1823 1824 static int dlm_sctp_connect(struct connection *con, struct socket *sock, 1825 struct sockaddr *addr, int addr_len) 1826 { 1827 int ret; 1828 1829 /* 1830 * Make sock->ops->connect() function return in specified time, 1831 * since O_NONBLOCK argument in connect() function does not work here, 1832 * then, we should restore the default value of this attribute. 1833 */ 1834 sock_set_sndtimeo(sock->sk, 5); 1835 ret = sock->ops->connect(sock, addr, addr_len, 0); 1836 sock_set_sndtimeo(sock->sk, 0); 1837 if (ret < 0) 1838 return ret; 1839 1840 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) 1841 log_print("connected to node %d", con->nodeid); 1842 1843 return 0; 1844 } 1845 1846 static int dlm_sctp_listen_validate(void) 1847 { 1848 if (!IS_ENABLED(CONFIG_IP_SCTP)) { 1849 log_print("SCTP is not enabled by this kernel"); 1850 return -EOPNOTSUPP; 1851 } 1852 1853 request_module("sctp"); 1854 return 0; 1855 } 1856 1857 static int dlm_sctp_bind_listen(struct socket *sock) 1858 { 1859 return sctp_bind_addrs(sock, dlm_config.ci_tcp_port); 1860 } 1861 1862 static void dlm_sctp_sockopts(struct socket *sock) 1863 { 1864 /* Turn off Nagle's algorithm */ 1865 sctp_sock_set_nodelay(sock->sk); 1866 sock_set_rcvbuf(sock->sk, NEEDED_RMEM); 1867 } 1868 1869 static const struct dlm_proto_ops dlm_sctp_ops = { 1870 .name = "SCTP", 1871 .proto = IPPROTO_SCTP, 1872 .try_new_addr = true, 1873 .connect = dlm_sctp_connect, 1874 .sockopts = dlm_sctp_sockopts, 1875 .bind = dlm_sctp_bind, 1876 .listen_validate = dlm_sctp_listen_validate, 1877 .listen_sockopts = dlm_sctp_sockopts, 1878 .listen_bind = dlm_sctp_bind_listen, 1879 }; 1880 1881 int dlm_lowcomms_start(void) 1882 { 1883 int error = -EINVAL; 1884 1885 init_local(); 1886 if (!dlm_local_count) { 1887 error = -ENOTCONN; 1888 log_print("no local IP address has been set"); 1889 goto fail; 1890 } 1891 1892 error = work_start(); 1893 if (error) 1894 goto fail_local; 1895 1896 /* Start listening */ 1897 switch (dlm_config.ci_protocol) { 1898 case DLM_PROTO_TCP: 1899 dlm_proto_ops = &dlm_tcp_ops; 1900 break; 1901 case DLM_PROTO_SCTP: 1902 dlm_proto_ops = &dlm_sctp_ops; 1903 break; 1904 default: 1905 log_print("Invalid protocol identifier %d set", 1906 dlm_config.ci_protocol); 1907 error = -EINVAL; 1908 goto fail_proto_ops; 1909 } 1910 1911 error = dlm_listen_for_all(); 1912 if (error) 1913 goto fail_listen; 1914 1915 return 0; 1916 1917 fail_listen: 1918 dlm_proto_ops = NULL; 1919 fail_proto_ops: 1920 work_stop(); 1921 fail_local: 1922 deinit_local(); 1923 fail: 1924 return error; 1925 } 1926 1927 void dlm_lowcomms_init(void) 1928 { 1929 int i; 1930 1931 for (i = 0; i < CONN_HASH_SIZE; i++) 1932 INIT_HLIST_HEAD(&connection_hash[i]); 1933 1934 INIT_WORK(&listen_con.rwork, process_listen_recv_socket); 1935 } 1936 1937 void dlm_lowcomms_exit(void) 1938 { 1939 struct dlm_node_addr *na, *safe; 1940 1941 spin_lock(&dlm_node_addrs_spin); 1942 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) { 1943 list_del(&na->list); 1944 while (na->addr_count--) 1945 kfree(na->addr[na->addr_count]); 1946 kfree(na); 1947 } 1948 spin_unlock(&dlm_node_addrs_spin); 1949 } 1950