1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 /* 13 * lowcomms.c 14 * 15 * This is the "low-level" comms layer. 16 * 17 * It is responsible for sending/receiving messages 18 * from other nodes in the cluster. 19 * 20 * Cluster nodes are referred to by their nodeids. nodeids are 21 * simply 32 bit numbers to the locking module - if they need to 22 * be expanded for the cluster infrastructure then that is its 23 * responsibility. It is this layer's 24 * responsibility to resolve these into IP address or 25 * whatever it needs for inter-node communication. 26 * 27 * The comms level is two kernel threads that deal mainly with 28 * the receiving of messages from other nodes and passing them 29 * up to the mid-level comms layer (which understands the 30 * message format) for execution by the locking core, and 31 * a send thread which does all the setting up of connections 32 * to remote nodes and the sending of data. Threads are not allowed 33 * to send their own data because it may cause them to wait in times 34 * of high load. Also, this way, the sending thread can collect together 35 * messages bound for one node and send them in one block. 36 * 37 * lowcomms will choose to use either TCP or SCTP as its transport layer 38 * depending on the configuration variable 'protocol'. This should be set 39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a 40 * cluster-wide mechanism as it must be the same on all nodes of the cluster 41 * for the DLM to function. 42 * 43 */ 44 45 #include <asm/ioctls.h> 46 #include <net/sock.h> 47 #include <net/tcp.h> 48 #include <linux/pagemap.h> 49 #include <linux/file.h> 50 #include <linux/mutex.h> 51 #include <linux/sctp.h> 52 #include <linux/slab.h> 53 #include <net/sctp/sctp.h> 54 #include <net/ipv6.h> 55 56 #include "dlm_internal.h" 57 #include "lowcomms.h" 58 #include "midcomms.h" 59 #include "config.h" 60 61 #define NEEDED_RMEM (4*1024*1024) 62 #define CONN_HASH_SIZE 32 63 64 /* Number of messages to send before rescheduling */ 65 #define MAX_SEND_MSG_COUNT 25 66 67 struct cbuf { 68 unsigned int base; 69 unsigned int len; 70 unsigned int mask; 71 }; 72 73 static void cbuf_add(struct cbuf *cb, int n) 74 { 75 cb->len += n; 76 } 77 78 static int cbuf_data(struct cbuf *cb) 79 { 80 return ((cb->base + cb->len) & cb->mask); 81 } 82 83 static void cbuf_init(struct cbuf *cb, int size) 84 { 85 cb->base = cb->len = 0; 86 cb->mask = size-1; 87 } 88 89 static void cbuf_eat(struct cbuf *cb, int n) 90 { 91 cb->len -= n; 92 cb->base += n; 93 cb->base &= cb->mask; 94 } 95 96 static bool cbuf_empty(struct cbuf *cb) 97 { 98 return cb->len == 0; 99 } 100 101 struct connection { 102 struct socket *sock; /* NULL if not connected */ 103 uint32_t nodeid; /* So we know who we are in the list */ 104 struct mutex sock_mutex; 105 unsigned long flags; 106 #define CF_READ_PENDING 1 107 #define CF_WRITE_PENDING 2 108 #define CF_INIT_PENDING 4 109 #define CF_IS_OTHERCON 5 110 #define CF_CLOSE 6 111 #define CF_APP_LIMITED 7 112 #define CF_CLOSING 8 113 struct list_head writequeue; /* List of outgoing writequeue_entries */ 114 spinlock_t writequeue_lock; 115 int (*rx_action) (struct connection *); /* What to do when active */ 116 void (*connect_action) (struct connection *); /* What to do to connect */ 117 struct page *rx_page; 118 struct cbuf cb; 119 int retries; 120 #define MAX_CONNECT_RETRIES 3 121 struct hlist_node list; 122 struct connection *othercon; 123 struct work_struct rwork; /* Receive workqueue */ 124 struct work_struct swork; /* Send workqueue */ 125 }; 126 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 127 128 /* An entry waiting to be sent */ 129 struct writequeue_entry { 130 struct list_head list; 131 struct page *page; 132 int offset; 133 int len; 134 int end; 135 int users; 136 struct connection *con; 137 }; 138 139 struct dlm_node_addr { 140 struct list_head list; 141 int nodeid; 142 int addr_count; 143 int curr_addr_index; 144 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; 145 }; 146 147 static struct listen_sock_callbacks { 148 void (*sk_error_report)(struct sock *); 149 void (*sk_data_ready)(struct sock *); 150 void (*sk_state_change)(struct sock *); 151 void (*sk_write_space)(struct sock *); 152 } listen_sock; 153 154 static LIST_HEAD(dlm_node_addrs); 155 static DEFINE_SPINLOCK(dlm_node_addrs_spin); 156 157 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 158 static int dlm_local_count; 159 static int dlm_allow_conn; 160 161 /* Work queues */ 162 static struct workqueue_struct *recv_workqueue; 163 static struct workqueue_struct *send_workqueue; 164 165 static struct hlist_head connection_hash[CONN_HASH_SIZE]; 166 static DEFINE_MUTEX(connections_lock); 167 static struct kmem_cache *con_cache; 168 169 static void process_recv_sockets(struct work_struct *work); 170 static void process_send_sockets(struct work_struct *work); 171 172 173 /* This is deliberately very simple because most clusters have simple 174 sequential nodeids, so we should be able to go straight to a connection 175 struct in the array */ 176 static inline int nodeid_hash(int nodeid) 177 { 178 return nodeid & (CONN_HASH_SIZE-1); 179 } 180 181 static struct connection *__find_con(int nodeid) 182 { 183 int r; 184 struct connection *con; 185 186 r = nodeid_hash(nodeid); 187 188 hlist_for_each_entry(con, &connection_hash[r], list) { 189 if (con->nodeid == nodeid) 190 return con; 191 } 192 return NULL; 193 } 194 195 /* 196 * If 'allocation' is zero then we don't attempt to create a new 197 * connection structure for this node. 198 */ 199 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 200 { 201 struct connection *con = NULL; 202 int r; 203 204 con = __find_con(nodeid); 205 if (con || !alloc) 206 return con; 207 208 con = kmem_cache_zalloc(con_cache, alloc); 209 if (!con) 210 return NULL; 211 212 r = nodeid_hash(nodeid); 213 hlist_add_head(&con->list, &connection_hash[r]); 214 215 con->nodeid = nodeid; 216 mutex_init(&con->sock_mutex); 217 INIT_LIST_HEAD(&con->writequeue); 218 spin_lock_init(&con->writequeue_lock); 219 INIT_WORK(&con->swork, process_send_sockets); 220 INIT_WORK(&con->rwork, process_recv_sockets); 221 222 /* Setup action pointers for child sockets */ 223 if (con->nodeid) { 224 struct connection *zerocon = __find_con(0); 225 226 con->connect_action = zerocon->connect_action; 227 if (!con->rx_action) 228 con->rx_action = zerocon->rx_action; 229 } 230 231 return con; 232 } 233 234 /* Loop round all connections */ 235 static void foreach_conn(void (*conn_func)(struct connection *c)) 236 { 237 int i; 238 struct hlist_node *n; 239 struct connection *con; 240 241 for (i = 0; i < CONN_HASH_SIZE; i++) { 242 hlist_for_each_entry_safe(con, n, &connection_hash[i], list) 243 conn_func(con); 244 } 245 } 246 247 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 248 { 249 struct connection *con; 250 251 mutex_lock(&connections_lock); 252 con = __nodeid2con(nodeid, allocation); 253 mutex_unlock(&connections_lock); 254 255 return con; 256 } 257 258 static struct dlm_node_addr *find_node_addr(int nodeid) 259 { 260 struct dlm_node_addr *na; 261 262 list_for_each_entry(na, &dlm_node_addrs, list) { 263 if (na->nodeid == nodeid) 264 return na; 265 } 266 return NULL; 267 } 268 269 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y) 270 { 271 switch (x->ss_family) { 272 case AF_INET: { 273 struct sockaddr_in *sinx = (struct sockaddr_in *)x; 274 struct sockaddr_in *siny = (struct sockaddr_in *)y; 275 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) 276 return 0; 277 if (sinx->sin_port != siny->sin_port) 278 return 0; 279 break; 280 } 281 case AF_INET6: { 282 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; 283 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; 284 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) 285 return 0; 286 if (sinx->sin6_port != siny->sin6_port) 287 return 0; 288 break; 289 } 290 default: 291 return 0; 292 } 293 return 1; 294 } 295 296 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, 297 struct sockaddr *sa_out, bool try_new_addr) 298 { 299 struct sockaddr_storage sas; 300 struct dlm_node_addr *na; 301 302 if (!dlm_local_count) 303 return -1; 304 305 spin_lock(&dlm_node_addrs_spin); 306 na = find_node_addr(nodeid); 307 if (na && na->addr_count) { 308 memcpy(&sas, na->addr[na->curr_addr_index], 309 sizeof(struct sockaddr_storage)); 310 311 if (try_new_addr) { 312 na->curr_addr_index++; 313 if (na->curr_addr_index == na->addr_count) 314 na->curr_addr_index = 0; 315 } 316 } 317 spin_unlock(&dlm_node_addrs_spin); 318 319 if (!na) 320 return -EEXIST; 321 322 if (!na->addr_count) 323 return -ENOENT; 324 325 if (sas_out) 326 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); 327 328 if (!sa_out) 329 return 0; 330 331 if (dlm_local_addr[0]->ss_family == AF_INET) { 332 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; 333 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; 334 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 335 } else { 336 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas; 337 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out; 338 ret6->sin6_addr = in6->sin6_addr; 339 } 340 341 return 0; 342 } 343 344 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) 345 { 346 struct dlm_node_addr *na; 347 int rv = -EEXIST; 348 int addr_i; 349 350 spin_lock(&dlm_node_addrs_spin); 351 list_for_each_entry(na, &dlm_node_addrs, list) { 352 if (!na->addr_count) 353 continue; 354 355 for (addr_i = 0; addr_i < na->addr_count; addr_i++) { 356 if (addr_compare(na->addr[addr_i], addr)) { 357 *nodeid = na->nodeid; 358 rv = 0; 359 goto unlock; 360 } 361 } 362 } 363 unlock: 364 spin_unlock(&dlm_node_addrs_spin); 365 return rv; 366 } 367 368 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) 369 { 370 struct sockaddr_storage *new_addr; 371 struct dlm_node_addr *new_node, *na; 372 373 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS); 374 if (!new_node) 375 return -ENOMEM; 376 377 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS); 378 if (!new_addr) { 379 kfree(new_node); 380 return -ENOMEM; 381 } 382 383 memcpy(new_addr, addr, len); 384 385 spin_lock(&dlm_node_addrs_spin); 386 na = find_node_addr(nodeid); 387 if (!na) { 388 new_node->nodeid = nodeid; 389 new_node->addr[0] = new_addr; 390 new_node->addr_count = 1; 391 list_add(&new_node->list, &dlm_node_addrs); 392 spin_unlock(&dlm_node_addrs_spin); 393 return 0; 394 } 395 396 if (na->addr_count >= DLM_MAX_ADDR_COUNT) { 397 spin_unlock(&dlm_node_addrs_spin); 398 kfree(new_addr); 399 kfree(new_node); 400 return -ENOSPC; 401 } 402 403 na->addr[na->addr_count++] = new_addr; 404 spin_unlock(&dlm_node_addrs_spin); 405 kfree(new_node); 406 return 0; 407 } 408 409 /* Data available on socket or listen socket received a connect */ 410 static void lowcomms_data_ready(struct sock *sk) 411 { 412 struct connection *con; 413 414 read_lock_bh(&sk->sk_callback_lock); 415 con = sock2con(sk); 416 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 417 queue_work(recv_workqueue, &con->rwork); 418 read_unlock_bh(&sk->sk_callback_lock); 419 } 420 421 static void lowcomms_write_space(struct sock *sk) 422 { 423 struct connection *con; 424 425 read_lock_bh(&sk->sk_callback_lock); 426 con = sock2con(sk); 427 if (!con) 428 goto out; 429 430 clear_bit(SOCK_NOSPACE, &con->sock->flags); 431 432 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 433 con->sock->sk->sk_write_pending--; 434 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); 435 } 436 437 queue_work(send_workqueue, &con->swork); 438 out: 439 read_unlock_bh(&sk->sk_callback_lock); 440 } 441 442 static inline void lowcomms_connect_sock(struct connection *con) 443 { 444 if (test_bit(CF_CLOSE, &con->flags)) 445 return; 446 queue_work(send_workqueue, &con->swork); 447 cond_resched(); 448 } 449 450 static void lowcomms_state_change(struct sock *sk) 451 { 452 /* SCTP layer is not calling sk_data_ready when the connection 453 * is done, so we catch the signal through here. Also, it 454 * doesn't switch socket state when entering shutdown, so we 455 * skip the write in that case. 456 */ 457 if (sk->sk_shutdown) { 458 if (sk->sk_shutdown == RCV_SHUTDOWN) 459 lowcomms_data_ready(sk); 460 } else if (sk->sk_state == TCP_ESTABLISHED) { 461 lowcomms_write_space(sk); 462 } 463 } 464 465 int dlm_lowcomms_connect_node(int nodeid) 466 { 467 struct connection *con; 468 469 if (nodeid == dlm_our_nodeid()) 470 return 0; 471 472 con = nodeid2con(nodeid, GFP_NOFS); 473 if (!con) 474 return -ENOMEM; 475 lowcomms_connect_sock(con); 476 return 0; 477 } 478 479 static void lowcomms_error_report(struct sock *sk) 480 { 481 struct connection *con; 482 struct sockaddr_storage saddr; 483 void (*orig_report)(struct sock *) = NULL; 484 485 read_lock_bh(&sk->sk_callback_lock); 486 con = sock2con(sk); 487 if (con == NULL) 488 goto out; 489 490 orig_report = listen_sock.sk_error_report; 491 if (con->sock == NULL || 492 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) { 493 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 494 "sending to node %d, port %d, " 495 "sk_err=%d/%d\n", dlm_our_nodeid(), 496 con->nodeid, dlm_config.ci_tcp_port, 497 sk->sk_err, sk->sk_err_soft); 498 } else if (saddr.ss_family == AF_INET) { 499 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr; 500 501 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 502 "sending to node %d at %pI4, port %d, " 503 "sk_err=%d/%d\n", dlm_our_nodeid(), 504 con->nodeid, &sin4->sin_addr.s_addr, 505 dlm_config.ci_tcp_port, sk->sk_err, 506 sk->sk_err_soft); 507 } else { 508 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr; 509 510 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 511 "sending to node %d at %u.%u.%u.%u, " 512 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(), 513 con->nodeid, sin6->sin6_addr.s6_addr32[0], 514 sin6->sin6_addr.s6_addr32[1], 515 sin6->sin6_addr.s6_addr32[2], 516 sin6->sin6_addr.s6_addr32[3], 517 dlm_config.ci_tcp_port, sk->sk_err, 518 sk->sk_err_soft); 519 } 520 out: 521 read_unlock_bh(&sk->sk_callback_lock); 522 if (orig_report) 523 orig_report(sk); 524 } 525 526 /* Note: sk_callback_lock must be locked before calling this function. */ 527 static void save_listen_callbacks(struct socket *sock) 528 { 529 struct sock *sk = sock->sk; 530 531 listen_sock.sk_data_ready = sk->sk_data_ready; 532 listen_sock.sk_state_change = sk->sk_state_change; 533 listen_sock.sk_write_space = sk->sk_write_space; 534 listen_sock.sk_error_report = sk->sk_error_report; 535 } 536 537 static void restore_callbacks(struct socket *sock) 538 { 539 struct sock *sk = sock->sk; 540 541 write_lock_bh(&sk->sk_callback_lock); 542 sk->sk_user_data = NULL; 543 sk->sk_data_ready = listen_sock.sk_data_ready; 544 sk->sk_state_change = listen_sock.sk_state_change; 545 sk->sk_write_space = listen_sock.sk_write_space; 546 sk->sk_error_report = listen_sock.sk_error_report; 547 write_unlock_bh(&sk->sk_callback_lock); 548 } 549 550 /* Make a socket active */ 551 static void add_sock(struct socket *sock, struct connection *con) 552 { 553 struct sock *sk = sock->sk; 554 555 write_lock_bh(&sk->sk_callback_lock); 556 con->sock = sock; 557 558 sk->sk_user_data = con; 559 /* Install a data_ready callback */ 560 sk->sk_data_ready = lowcomms_data_ready; 561 sk->sk_write_space = lowcomms_write_space; 562 sk->sk_state_change = lowcomms_state_change; 563 sk->sk_allocation = GFP_NOFS; 564 sk->sk_error_report = lowcomms_error_report; 565 write_unlock_bh(&sk->sk_callback_lock); 566 } 567 568 /* Add the port number to an IPv6 or 4 sockaddr and return the address 569 length */ 570 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 571 int *addr_len) 572 { 573 saddr->ss_family = dlm_local_addr[0]->ss_family; 574 if (saddr->ss_family == AF_INET) { 575 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 576 in4_addr->sin_port = cpu_to_be16(port); 577 *addr_len = sizeof(struct sockaddr_in); 578 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 579 } else { 580 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 581 in6_addr->sin6_port = cpu_to_be16(port); 582 *addr_len = sizeof(struct sockaddr_in6); 583 } 584 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 585 } 586 587 /* Close a remote connection and tidy up */ 588 static void close_connection(struct connection *con, bool and_other, 589 bool tx, bool rx) 590 { 591 bool closing = test_and_set_bit(CF_CLOSING, &con->flags); 592 593 if (tx && !closing && cancel_work_sync(&con->swork)) { 594 log_print("canceled swork for node %d", con->nodeid); 595 clear_bit(CF_WRITE_PENDING, &con->flags); 596 } 597 if (rx && !closing && cancel_work_sync(&con->rwork)) { 598 log_print("canceled rwork for node %d", con->nodeid); 599 clear_bit(CF_READ_PENDING, &con->flags); 600 } 601 602 mutex_lock(&con->sock_mutex); 603 if (con->sock) { 604 restore_callbacks(con->sock); 605 sock_release(con->sock); 606 con->sock = NULL; 607 } 608 if (con->othercon && and_other) { 609 /* Will only re-enter once. */ 610 close_connection(con->othercon, false, true, true); 611 } 612 if (con->rx_page) { 613 __free_page(con->rx_page); 614 con->rx_page = NULL; 615 } 616 617 con->retries = 0; 618 mutex_unlock(&con->sock_mutex); 619 clear_bit(CF_CLOSING, &con->flags); 620 } 621 622 /* Data received from remote end */ 623 static int receive_from_sock(struct connection *con) 624 { 625 int ret = 0; 626 struct msghdr msg = {}; 627 struct kvec iov[2]; 628 unsigned len; 629 int r; 630 int call_again_soon = 0; 631 int nvec; 632 633 mutex_lock(&con->sock_mutex); 634 635 if (con->sock == NULL) { 636 ret = -EAGAIN; 637 goto out_close; 638 } 639 if (con->nodeid == 0) { 640 ret = -EINVAL; 641 goto out_close; 642 } 643 644 if (con->rx_page == NULL) { 645 /* 646 * This doesn't need to be atomic, but I think it should 647 * improve performance if it is. 648 */ 649 con->rx_page = alloc_page(GFP_ATOMIC); 650 if (con->rx_page == NULL) 651 goto out_resched; 652 cbuf_init(&con->cb, PAGE_SIZE); 653 } 654 655 /* 656 * iov[0] is the bit of the circular buffer between the current end 657 * point (cb.base + cb.len) and the end of the buffer. 658 */ 659 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 660 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 661 iov[1].iov_len = 0; 662 nvec = 1; 663 664 /* 665 * iov[1] is the bit of the circular buffer between the start of the 666 * buffer and the start of the currently used section (cb.base) 667 */ 668 if (cbuf_data(&con->cb) >= con->cb.base) { 669 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb); 670 iov[1].iov_len = con->cb.base; 671 iov[1].iov_base = page_address(con->rx_page); 672 nvec = 2; 673 } 674 len = iov[0].iov_len + iov[1].iov_len; 675 iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len); 676 677 r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL); 678 if (ret <= 0) 679 goto out_close; 680 else if (ret == len) 681 call_again_soon = 1; 682 683 cbuf_add(&con->cb, ret); 684 ret = dlm_process_incoming_buffer(con->nodeid, 685 page_address(con->rx_page), 686 con->cb.base, con->cb.len, 687 PAGE_SIZE); 688 if (ret == -EBADMSG) { 689 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", 690 page_address(con->rx_page), con->cb.base, 691 con->cb.len, r); 692 } 693 if (ret < 0) 694 goto out_close; 695 cbuf_eat(&con->cb, ret); 696 697 if (cbuf_empty(&con->cb) && !call_again_soon) { 698 __free_page(con->rx_page); 699 con->rx_page = NULL; 700 } 701 702 if (call_again_soon) 703 goto out_resched; 704 mutex_unlock(&con->sock_mutex); 705 return 0; 706 707 out_resched: 708 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 709 queue_work(recv_workqueue, &con->rwork); 710 mutex_unlock(&con->sock_mutex); 711 return -EAGAIN; 712 713 out_close: 714 mutex_unlock(&con->sock_mutex); 715 if (ret != -EAGAIN) { 716 close_connection(con, true, true, false); 717 /* Reconnect when there is something to send */ 718 } 719 /* Don't return success if we really got EOF */ 720 if (ret == 0) 721 ret = -EAGAIN; 722 723 return ret; 724 } 725 726 /* Listening socket is busy, accept a connection */ 727 static int accept_from_sock(struct connection *con) 728 { 729 int result; 730 struct sockaddr_storage peeraddr; 731 struct socket *newsock; 732 int len; 733 int nodeid; 734 struct connection *newcon; 735 struct connection *addcon; 736 737 mutex_lock(&connections_lock); 738 if (!dlm_allow_conn) { 739 mutex_unlock(&connections_lock); 740 return -1; 741 } 742 mutex_unlock(&connections_lock); 743 744 mutex_lock_nested(&con->sock_mutex, 0); 745 746 if (!con->sock) { 747 mutex_unlock(&con->sock_mutex); 748 return -ENOTCONN; 749 } 750 751 result = kernel_accept(con->sock, &newsock, O_NONBLOCK); 752 if (result < 0) 753 goto accept_err; 754 755 /* Get the connected socket's peer */ 756 memset(&peeraddr, 0, sizeof(peeraddr)); 757 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); 758 if (len < 0) { 759 result = -ECONNABORTED; 760 goto accept_err; 761 } 762 763 /* Get the new node's NODEID */ 764 make_sockaddr(&peeraddr, 0, &len); 765 if (addr_to_nodeid(&peeraddr, &nodeid)) { 766 unsigned char *b=(unsigned char *)&peeraddr; 767 log_print("connect from non cluster node"); 768 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 769 b, sizeof(struct sockaddr_storage)); 770 sock_release(newsock); 771 mutex_unlock(&con->sock_mutex); 772 return -1; 773 } 774 775 log_print("got connection from %d", nodeid); 776 777 /* Check to see if we already have a connection to this node. This 778 * could happen if the two nodes initiate a connection at roughly 779 * the same time and the connections cross on the wire. 780 * In this case we store the incoming one in "othercon" 781 */ 782 newcon = nodeid2con(nodeid, GFP_NOFS); 783 if (!newcon) { 784 result = -ENOMEM; 785 goto accept_err; 786 } 787 mutex_lock_nested(&newcon->sock_mutex, 1); 788 if (newcon->sock) { 789 struct connection *othercon = newcon->othercon; 790 791 if (!othercon) { 792 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); 793 if (!othercon) { 794 log_print("failed to allocate incoming socket"); 795 mutex_unlock(&newcon->sock_mutex); 796 result = -ENOMEM; 797 goto accept_err; 798 } 799 othercon->nodeid = nodeid; 800 othercon->rx_action = receive_from_sock; 801 mutex_init(&othercon->sock_mutex); 802 INIT_LIST_HEAD(&othercon->writequeue); 803 spin_lock_init(&othercon->writequeue_lock); 804 INIT_WORK(&othercon->swork, process_send_sockets); 805 INIT_WORK(&othercon->rwork, process_recv_sockets); 806 set_bit(CF_IS_OTHERCON, &othercon->flags); 807 } 808 mutex_lock_nested(&othercon->sock_mutex, 2); 809 if (!othercon->sock) { 810 newcon->othercon = othercon; 811 add_sock(newsock, othercon); 812 addcon = othercon; 813 mutex_unlock(&othercon->sock_mutex); 814 } 815 else { 816 printk("Extra connection from node %d attempted\n", nodeid); 817 result = -EAGAIN; 818 mutex_unlock(&othercon->sock_mutex); 819 mutex_unlock(&newcon->sock_mutex); 820 goto accept_err; 821 } 822 } 823 else { 824 newcon->rx_action = receive_from_sock; 825 /* accept copies the sk after we've saved the callbacks, so we 826 don't want to save them a second time or comm errors will 827 result in calling sk_error_report recursively. */ 828 add_sock(newsock, newcon); 829 addcon = newcon; 830 } 831 832 mutex_unlock(&newcon->sock_mutex); 833 834 /* 835 * Add it to the active queue in case we got data 836 * between processing the accept adding the socket 837 * to the read_sockets list 838 */ 839 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 840 queue_work(recv_workqueue, &addcon->rwork); 841 mutex_unlock(&con->sock_mutex); 842 843 return 0; 844 845 accept_err: 846 mutex_unlock(&con->sock_mutex); 847 if (newsock) 848 sock_release(newsock); 849 850 if (result != -EAGAIN) 851 log_print("error accepting connection from node: %d", result); 852 return result; 853 } 854 855 static void free_entry(struct writequeue_entry *e) 856 { 857 __free_page(e->page); 858 kfree(e); 859 } 860 861 /* 862 * writequeue_entry_complete - try to delete and free write queue entry 863 * @e: write queue entry to try to delete 864 * @completed: bytes completed 865 * 866 * writequeue_lock must be held. 867 */ 868 static void writequeue_entry_complete(struct writequeue_entry *e, int completed) 869 { 870 e->offset += completed; 871 e->len -= completed; 872 873 if (e->len == 0 && e->users == 0) { 874 list_del(&e->list); 875 free_entry(e); 876 } 877 } 878 879 /* 880 * sctp_bind_addrs - bind a SCTP socket to all our addresses 881 */ 882 static int sctp_bind_addrs(struct connection *con, uint16_t port) 883 { 884 struct sockaddr_storage localaddr; 885 struct sockaddr *addr = (struct sockaddr *)&localaddr; 886 int i, addr_len, result = 0; 887 888 for (i = 0; i < dlm_local_count; i++) { 889 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 890 make_sockaddr(&localaddr, port, &addr_len); 891 892 if (!i) 893 result = kernel_bind(con->sock, addr, addr_len); 894 else 895 result = sock_bind_add(con->sock->sk, addr, addr_len); 896 897 if (result < 0) { 898 log_print("Can't bind to %d addr number %d, %d.\n", 899 port, i + 1, result); 900 break; 901 } 902 } 903 return result; 904 } 905 906 /* Initiate an SCTP association. 907 This is a special case of send_to_sock() in that we don't yet have a 908 peeled-off socket for this association, so we use the listening socket 909 and add the primary IP address of the remote node. 910 */ 911 static void sctp_connect_to_sock(struct connection *con) 912 { 913 struct sockaddr_storage daddr; 914 int result; 915 int addr_len; 916 struct socket *sock; 917 918 if (con->nodeid == 0) { 919 log_print("attempt to connect sock 0 foiled"); 920 return; 921 } 922 923 mutex_lock(&con->sock_mutex); 924 925 /* Some odd races can cause double-connects, ignore them */ 926 if (con->retries++ > MAX_CONNECT_RETRIES) 927 goto out; 928 929 if (con->sock) { 930 log_print("node %d already connected.", con->nodeid); 931 goto out; 932 } 933 934 memset(&daddr, 0, sizeof(daddr)); 935 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true); 936 if (result < 0) { 937 log_print("no address for nodeid %d", con->nodeid); 938 goto out; 939 } 940 941 /* Create a socket to communicate with */ 942 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 943 SOCK_STREAM, IPPROTO_SCTP, &sock); 944 if (result < 0) 945 goto socket_err; 946 947 con->rx_action = receive_from_sock; 948 con->connect_action = sctp_connect_to_sock; 949 add_sock(sock, con); 950 951 /* Bind to all addresses. */ 952 if (sctp_bind_addrs(con, 0)) 953 goto bind_err; 954 955 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len); 956 957 log_print("connecting to %d", con->nodeid); 958 959 /* Turn off Nagle's algorithm */ 960 sctp_sock_set_nodelay(sock->sk); 961 962 /* 963 * Make sock->ops->connect() function return in specified time, 964 * since O_NONBLOCK argument in connect() function does not work here, 965 * then, we should restore the default value of this attribute. 966 */ 967 sock_set_sndtimeo(sock->sk, 5); 968 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len, 969 0); 970 sock_set_sndtimeo(sock->sk, 0); 971 972 if (result == -EINPROGRESS) 973 result = 0; 974 if (result == 0) 975 goto out; 976 977 bind_err: 978 con->sock = NULL; 979 sock_release(sock); 980 981 socket_err: 982 /* 983 * Some errors are fatal and this list might need adjusting. For other 984 * errors we try again until the max number of retries is reached. 985 */ 986 if (result != -EHOSTUNREACH && 987 result != -ENETUNREACH && 988 result != -ENETDOWN && 989 result != -EINVAL && 990 result != -EPROTONOSUPPORT) { 991 log_print("connect %d try %d error %d", con->nodeid, 992 con->retries, result); 993 mutex_unlock(&con->sock_mutex); 994 msleep(1000); 995 lowcomms_connect_sock(con); 996 return; 997 } 998 999 out: 1000 mutex_unlock(&con->sock_mutex); 1001 } 1002 1003 /* Connect a new socket to its peer */ 1004 static void tcp_connect_to_sock(struct connection *con) 1005 { 1006 struct sockaddr_storage saddr, src_addr; 1007 int addr_len; 1008 struct socket *sock = NULL; 1009 int result; 1010 1011 if (con->nodeid == 0) { 1012 log_print("attempt to connect sock 0 foiled"); 1013 return; 1014 } 1015 1016 mutex_lock(&con->sock_mutex); 1017 if (con->retries++ > MAX_CONNECT_RETRIES) 1018 goto out; 1019 1020 /* Some odd races can cause double-connects, ignore them */ 1021 if (con->sock) 1022 goto out; 1023 1024 /* Create a socket to communicate with */ 1025 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1026 SOCK_STREAM, IPPROTO_TCP, &sock); 1027 if (result < 0) 1028 goto out_err; 1029 1030 memset(&saddr, 0, sizeof(saddr)); 1031 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); 1032 if (result < 0) { 1033 log_print("no address for nodeid %d", con->nodeid); 1034 goto out_err; 1035 } 1036 1037 con->rx_action = receive_from_sock; 1038 con->connect_action = tcp_connect_to_sock; 1039 add_sock(sock, con); 1040 1041 /* Bind to our cluster-known address connecting to avoid 1042 routing problems */ 1043 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 1044 make_sockaddr(&src_addr, 0, &addr_len); 1045 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, 1046 addr_len); 1047 if (result < 0) { 1048 log_print("could not bind for connect: %d", result); 1049 /* This *may* not indicate a critical error */ 1050 } 1051 1052 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 1053 1054 log_print("connecting to %d", con->nodeid); 1055 1056 /* Turn off Nagle's algorithm */ 1057 tcp_sock_set_nodelay(sock->sk); 1058 1059 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 1060 O_NONBLOCK); 1061 if (result == -EINPROGRESS) 1062 result = 0; 1063 if (result == 0) 1064 goto out; 1065 1066 out_err: 1067 if (con->sock) { 1068 sock_release(con->sock); 1069 con->sock = NULL; 1070 } else if (sock) { 1071 sock_release(sock); 1072 } 1073 /* 1074 * Some errors are fatal and this list might need adjusting. For other 1075 * errors we try again until the max number of retries is reached. 1076 */ 1077 if (result != -EHOSTUNREACH && 1078 result != -ENETUNREACH && 1079 result != -ENETDOWN && 1080 result != -EINVAL && 1081 result != -EPROTONOSUPPORT) { 1082 log_print("connect %d try %d error %d", con->nodeid, 1083 con->retries, result); 1084 mutex_unlock(&con->sock_mutex); 1085 msleep(1000); 1086 lowcomms_connect_sock(con); 1087 return; 1088 } 1089 out: 1090 mutex_unlock(&con->sock_mutex); 1091 return; 1092 } 1093 1094 static struct socket *tcp_create_listen_sock(struct connection *con, 1095 struct sockaddr_storage *saddr) 1096 { 1097 struct socket *sock = NULL; 1098 int result = 0; 1099 int addr_len; 1100 1101 if (dlm_local_addr[0]->ss_family == AF_INET) 1102 addr_len = sizeof(struct sockaddr_in); 1103 else 1104 addr_len = sizeof(struct sockaddr_in6); 1105 1106 /* Create a socket to communicate with */ 1107 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1108 SOCK_STREAM, IPPROTO_TCP, &sock); 1109 if (result < 0) { 1110 log_print("Can't create listening comms socket"); 1111 goto create_out; 1112 } 1113 1114 /* Turn off Nagle's algorithm */ 1115 tcp_sock_set_nodelay(sock->sk); 1116 1117 sock_set_reuseaddr(sock->sk); 1118 1119 write_lock_bh(&sock->sk->sk_callback_lock); 1120 sock->sk->sk_user_data = con; 1121 save_listen_callbacks(sock); 1122 con->rx_action = accept_from_sock; 1123 con->connect_action = tcp_connect_to_sock; 1124 write_unlock_bh(&sock->sk->sk_callback_lock); 1125 1126 /* Bind to our port */ 1127 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 1128 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 1129 if (result < 0) { 1130 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 1131 sock_release(sock); 1132 sock = NULL; 1133 con->sock = NULL; 1134 goto create_out; 1135 } 1136 sock_set_keepalive(sock->sk); 1137 1138 result = sock->ops->listen(sock, 5); 1139 if (result < 0) { 1140 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 1141 sock_release(sock); 1142 sock = NULL; 1143 goto create_out; 1144 } 1145 1146 create_out: 1147 return sock; 1148 } 1149 1150 /* Get local addresses */ 1151 static void init_local(void) 1152 { 1153 struct sockaddr_storage sas, *addr; 1154 int i; 1155 1156 dlm_local_count = 0; 1157 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { 1158 if (dlm_our_addr(&sas, i)) 1159 break; 1160 1161 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS); 1162 if (!addr) 1163 break; 1164 dlm_local_addr[dlm_local_count++] = addr; 1165 } 1166 } 1167 1168 /* Initialise SCTP socket and bind to all interfaces */ 1169 static int sctp_listen_for_all(void) 1170 { 1171 struct socket *sock = NULL; 1172 int result = -EINVAL; 1173 struct connection *con = nodeid2con(0, GFP_NOFS); 1174 1175 if (!con) 1176 return -ENOMEM; 1177 1178 log_print("Using SCTP for communications"); 1179 1180 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1181 SOCK_STREAM, IPPROTO_SCTP, &sock); 1182 if (result < 0) { 1183 log_print("Can't create comms socket, check SCTP is loaded"); 1184 goto out; 1185 } 1186 1187 sock_set_rcvbuf(sock->sk, NEEDED_RMEM); 1188 sctp_sock_set_nodelay(sock->sk); 1189 1190 write_lock_bh(&sock->sk->sk_callback_lock); 1191 /* Init con struct */ 1192 sock->sk->sk_user_data = con; 1193 save_listen_callbacks(sock); 1194 con->sock = sock; 1195 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1196 con->rx_action = accept_from_sock; 1197 con->connect_action = sctp_connect_to_sock; 1198 1199 write_unlock_bh(&sock->sk->sk_callback_lock); 1200 1201 /* Bind to all addresses. */ 1202 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port)) 1203 goto create_delsock; 1204 1205 result = sock->ops->listen(sock, 5); 1206 if (result < 0) { 1207 log_print("Can't set socket listening"); 1208 goto create_delsock; 1209 } 1210 1211 return 0; 1212 1213 create_delsock: 1214 sock_release(sock); 1215 con->sock = NULL; 1216 out: 1217 return result; 1218 } 1219 1220 static int tcp_listen_for_all(void) 1221 { 1222 struct socket *sock = NULL; 1223 struct connection *con = nodeid2con(0, GFP_NOFS); 1224 int result = -EINVAL; 1225 1226 if (!con) 1227 return -ENOMEM; 1228 1229 /* We don't support multi-homed hosts */ 1230 if (dlm_local_addr[1] != NULL) { 1231 log_print("TCP protocol can't handle multi-homed hosts, " 1232 "try SCTP"); 1233 return -EINVAL; 1234 } 1235 1236 log_print("Using TCP for communications"); 1237 1238 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1239 if (sock) { 1240 add_sock(sock, con); 1241 result = 0; 1242 } 1243 else { 1244 result = -EADDRINUSE; 1245 } 1246 1247 return result; 1248 } 1249 1250 1251 1252 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1253 gfp_t allocation) 1254 { 1255 struct writequeue_entry *entry; 1256 1257 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1258 if (!entry) 1259 return NULL; 1260 1261 entry->page = alloc_page(allocation); 1262 if (!entry->page) { 1263 kfree(entry); 1264 return NULL; 1265 } 1266 1267 entry->offset = 0; 1268 entry->len = 0; 1269 entry->end = 0; 1270 entry->users = 0; 1271 entry->con = con; 1272 1273 return entry; 1274 } 1275 1276 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1277 { 1278 struct connection *con; 1279 struct writequeue_entry *e; 1280 int offset = 0; 1281 1282 con = nodeid2con(nodeid, allocation); 1283 if (!con) 1284 return NULL; 1285 1286 spin_lock(&con->writequeue_lock); 1287 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1288 if ((&e->list == &con->writequeue) || 1289 (PAGE_SIZE - e->end < len)) { 1290 e = NULL; 1291 } else { 1292 offset = e->end; 1293 e->end += len; 1294 e->users++; 1295 } 1296 spin_unlock(&con->writequeue_lock); 1297 1298 if (e) { 1299 got_one: 1300 *ppc = page_address(e->page) + offset; 1301 return e; 1302 } 1303 1304 e = new_writequeue_entry(con, allocation); 1305 if (e) { 1306 spin_lock(&con->writequeue_lock); 1307 offset = e->end; 1308 e->end += len; 1309 e->users++; 1310 list_add_tail(&e->list, &con->writequeue); 1311 spin_unlock(&con->writequeue_lock); 1312 goto got_one; 1313 } 1314 return NULL; 1315 } 1316 1317 void dlm_lowcomms_commit_buffer(void *mh) 1318 { 1319 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1320 struct connection *con = e->con; 1321 int users; 1322 1323 spin_lock(&con->writequeue_lock); 1324 users = --e->users; 1325 if (users) 1326 goto out; 1327 e->len = e->end - e->offset; 1328 spin_unlock(&con->writequeue_lock); 1329 1330 queue_work(send_workqueue, &con->swork); 1331 return; 1332 1333 out: 1334 spin_unlock(&con->writequeue_lock); 1335 return; 1336 } 1337 1338 /* Send a message */ 1339 static void send_to_sock(struct connection *con) 1340 { 1341 int ret = 0; 1342 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1343 struct writequeue_entry *e; 1344 int len, offset; 1345 int count = 0; 1346 1347 mutex_lock(&con->sock_mutex); 1348 if (con->sock == NULL) 1349 goto out_connect; 1350 1351 spin_lock(&con->writequeue_lock); 1352 for (;;) { 1353 e = list_entry(con->writequeue.next, struct writequeue_entry, 1354 list); 1355 if ((struct list_head *) e == &con->writequeue) 1356 break; 1357 1358 len = e->len; 1359 offset = e->offset; 1360 BUG_ON(len == 0 && e->users == 0); 1361 spin_unlock(&con->writequeue_lock); 1362 1363 ret = 0; 1364 if (len) { 1365 ret = kernel_sendpage(con->sock, e->page, offset, len, 1366 msg_flags); 1367 if (ret == -EAGAIN || ret == 0) { 1368 if (ret == -EAGAIN && 1369 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && 1370 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1371 /* Notify TCP that we're limited by the 1372 * application window size. 1373 */ 1374 set_bit(SOCK_NOSPACE, &con->sock->flags); 1375 con->sock->sk->sk_write_pending++; 1376 } 1377 cond_resched(); 1378 goto out; 1379 } else if (ret < 0) 1380 goto send_error; 1381 } 1382 1383 /* Don't starve people filling buffers */ 1384 if (++count >= MAX_SEND_MSG_COUNT) { 1385 cond_resched(); 1386 count = 0; 1387 } 1388 1389 spin_lock(&con->writequeue_lock); 1390 writequeue_entry_complete(e, ret); 1391 } 1392 spin_unlock(&con->writequeue_lock); 1393 out: 1394 mutex_unlock(&con->sock_mutex); 1395 return; 1396 1397 send_error: 1398 mutex_unlock(&con->sock_mutex); 1399 close_connection(con, true, false, true); 1400 /* Requeue the send work. When the work daemon runs again, it will try 1401 a new connection, then call this function again. */ 1402 queue_work(send_workqueue, &con->swork); 1403 return; 1404 1405 out_connect: 1406 mutex_unlock(&con->sock_mutex); 1407 queue_work(send_workqueue, &con->swork); 1408 cond_resched(); 1409 } 1410 1411 static void clean_one_writequeue(struct connection *con) 1412 { 1413 struct writequeue_entry *e, *safe; 1414 1415 spin_lock(&con->writequeue_lock); 1416 list_for_each_entry_safe(e, safe, &con->writequeue, list) { 1417 list_del(&e->list); 1418 free_entry(e); 1419 } 1420 spin_unlock(&con->writequeue_lock); 1421 } 1422 1423 /* Called from recovery when it knows that a node has 1424 left the cluster */ 1425 int dlm_lowcomms_close(int nodeid) 1426 { 1427 struct connection *con; 1428 struct dlm_node_addr *na; 1429 1430 log_print("closing connection to node %d", nodeid); 1431 con = nodeid2con(nodeid, 0); 1432 if (con) { 1433 set_bit(CF_CLOSE, &con->flags); 1434 close_connection(con, true, true, true); 1435 clean_one_writequeue(con); 1436 } 1437 1438 spin_lock(&dlm_node_addrs_spin); 1439 na = find_node_addr(nodeid); 1440 if (na) { 1441 list_del(&na->list); 1442 while (na->addr_count--) 1443 kfree(na->addr[na->addr_count]); 1444 kfree(na); 1445 } 1446 spin_unlock(&dlm_node_addrs_spin); 1447 1448 return 0; 1449 } 1450 1451 /* Receive workqueue function */ 1452 static void process_recv_sockets(struct work_struct *work) 1453 { 1454 struct connection *con = container_of(work, struct connection, rwork); 1455 int err; 1456 1457 clear_bit(CF_READ_PENDING, &con->flags); 1458 do { 1459 err = con->rx_action(con); 1460 } while (!err); 1461 } 1462 1463 /* Send workqueue function */ 1464 static void process_send_sockets(struct work_struct *work) 1465 { 1466 struct connection *con = container_of(work, struct connection, swork); 1467 1468 clear_bit(CF_WRITE_PENDING, &con->flags); 1469 if (con->sock == NULL) /* not mutex protected so check it inside too */ 1470 con->connect_action(con); 1471 if (!list_empty(&con->writequeue)) 1472 send_to_sock(con); 1473 } 1474 1475 1476 /* Discard all entries on the write queues */ 1477 static void clean_writequeues(void) 1478 { 1479 foreach_conn(clean_one_writequeue); 1480 } 1481 1482 static void work_stop(void) 1483 { 1484 if (recv_workqueue) 1485 destroy_workqueue(recv_workqueue); 1486 if (send_workqueue) 1487 destroy_workqueue(send_workqueue); 1488 } 1489 1490 static int work_start(void) 1491 { 1492 recv_workqueue = alloc_workqueue("dlm_recv", 1493 WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 1494 if (!recv_workqueue) { 1495 log_print("can't start dlm_recv"); 1496 return -ENOMEM; 1497 } 1498 1499 send_workqueue = alloc_workqueue("dlm_send", 1500 WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 1501 if (!send_workqueue) { 1502 log_print("can't start dlm_send"); 1503 destroy_workqueue(recv_workqueue); 1504 return -ENOMEM; 1505 } 1506 1507 return 0; 1508 } 1509 1510 static void _stop_conn(struct connection *con, bool and_other) 1511 { 1512 mutex_lock(&con->sock_mutex); 1513 set_bit(CF_CLOSE, &con->flags); 1514 set_bit(CF_READ_PENDING, &con->flags); 1515 set_bit(CF_WRITE_PENDING, &con->flags); 1516 if (con->sock && con->sock->sk) { 1517 write_lock_bh(&con->sock->sk->sk_callback_lock); 1518 con->sock->sk->sk_user_data = NULL; 1519 write_unlock_bh(&con->sock->sk->sk_callback_lock); 1520 } 1521 if (con->othercon && and_other) 1522 _stop_conn(con->othercon, false); 1523 mutex_unlock(&con->sock_mutex); 1524 } 1525 1526 static void stop_conn(struct connection *con) 1527 { 1528 _stop_conn(con, true); 1529 } 1530 1531 static void free_conn(struct connection *con) 1532 { 1533 close_connection(con, true, true, true); 1534 if (con->othercon) 1535 kmem_cache_free(con_cache, con->othercon); 1536 hlist_del(&con->list); 1537 kmem_cache_free(con_cache, con); 1538 } 1539 1540 static void work_flush(void) 1541 { 1542 int ok; 1543 int i; 1544 struct hlist_node *n; 1545 struct connection *con; 1546 1547 if (recv_workqueue) 1548 flush_workqueue(recv_workqueue); 1549 if (send_workqueue) 1550 flush_workqueue(send_workqueue); 1551 do { 1552 ok = 1; 1553 foreach_conn(stop_conn); 1554 if (recv_workqueue) 1555 flush_workqueue(recv_workqueue); 1556 if (send_workqueue) 1557 flush_workqueue(send_workqueue); 1558 for (i = 0; i < CONN_HASH_SIZE && ok; i++) { 1559 hlist_for_each_entry_safe(con, n, 1560 &connection_hash[i], list) { 1561 ok &= test_bit(CF_READ_PENDING, &con->flags); 1562 ok &= test_bit(CF_WRITE_PENDING, &con->flags); 1563 if (con->othercon) { 1564 ok &= test_bit(CF_READ_PENDING, 1565 &con->othercon->flags); 1566 ok &= test_bit(CF_WRITE_PENDING, 1567 &con->othercon->flags); 1568 } 1569 } 1570 } 1571 } while (!ok); 1572 } 1573 1574 void dlm_lowcomms_stop(void) 1575 { 1576 /* Set all the flags to prevent any 1577 socket activity. 1578 */ 1579 mutex_lock(&connections_lock); 1580 dlm_allow_conn = 0; 1581 mutex_unlock(&connections_lock); 1582 work_flush(); 1583 clean_writequeues(); 1584 foreach_conn(free_conn); 1585 work_stop(); 1586 1587 kmem_cache_destroy(con_cache); 1588 } 1589 1590 int dlm_lowcomms_start(void) 1591 { 1592 int error = -EINVAL; 1593 struct connection *con; 1594 int i; 1595 1596 for (i = 0; i < CONN_HASH_SIZE; i++) 1597 INIT_HLIST_HEAD(&connection_hash[i]); 1598 1599 init_local(); 1600 if (!dlm_local_count) { 1601 error = -ENOTCONN; 1602 log_print("no local IP address has been set"); 1603 goto fail; 1604 } 1605 1606 error = -ENOMEM; 1607 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1608 __alignof__(struct connection), 0, 1609 NULL); 1610 if (!con_cache) 1611 goto fail; 1612 1613 error = work_start(); 1614 if (error) 1615 goto fail_destroy; 1616 1617 dlm_allow_conn = 1; 1618 1619 /* Start listening */ 1620 if (dlm_config.ci_protocol == 0) 1621 error = tcp_listen_for_all(); 1622 else 1623 error = sctp_listen_for_all(); 1624 if (error) 1625 goto fail_unlisten; 1626 1627 return 0; 1628 1629 fail_unlisten: 1630 dlm_allow_conn = 0; 1631 con = nodeid2con(0,0); 1632 if (con) { 1633 close_connection(con, false, true, true); 1634 kmem_cache_free(con_cache, con); 1635 } 1636 fail_destroy: 1637 kmem_cache_destroy(con_cache); 1638 fail: 1639 return error; 1640 } 1641 1642 void dlm_lowcomms_exit(void) 1643 { 1644 struct dlm_node_addr *na, *safe; 1645 1646 spin_lock(&dlm_node_addrs_spin); 1647 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) { 1648 list_del(&na->list); 1649 while (na->addr_count--) 1650 kfree(na->addr[na->addr_count]); 1651 kfree(na); 1652 } 1653 spin_unlock(&dlm_node_addrs_spin); 1654 } 1655