1 /****************************************************************************** 2 ******************************************************************************* 3 ** 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 5 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. 6 ** 7 ** This copyrighted material is made available to anyone wishing to use, 8 ** modify, copy, or redistribute it subject to the terms and conditions 9 ** of the GNU General Public License v.2. 10 ** 11 ******************************************************************************* 12 ******************************************************************************/ 13 14 /* 15 * lowcomms.c 16 * 17 * This is the "low-level" comms layer. 18 * 19 * It is responsible for sending/receiving messages 20 * from other nodes in the cluster. 21 * 22 * Cluster nodes are referred to by their nodeids. nodeids are 23 * simply 32 bit numbers to the locking module - if they need to 24 * be expanded for the cluster infrastructure then that is its 25 * responsibility. It is this layer's 26 * responsibility to resolve these into IP address or 27 * whatever it needs for inter-node communication. 28 * 29 * The comms level is two kernel threads that deal mainly with 30 * the receiving of messages from other nodes and passing them 31 * up to the mid-level comms layer (which understands the 32 * message format) for execution by the locking core, and 33 * a send thread which does all the setting up of connections 34 * to remote nodes and the sending of data. Threads are not allowed 35 * to send their own data because it may cause them to wait in times 36 * of high load. Also, this way, the sending thread can collect together 37 * messages bound for one node and send them in one block. 38 * 39 * lowcomms will choose to use either TCP or SCTP as its transport layer 40 * depending on the configuration variable 'protocol'. This should be set 41 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 43 * for the DLM to function. 44 * 45 */ 46 47 #include <asm/ioctls.h> 48 #include <net/sock.h> 49 #include <net/tcp.h> 50 #include <linux/pagemap.h> 51 #include <linux/file.h> 52 #include <linux/mutex.h> 53 #include <linux/sctp.h> 54 #include <net/sctp/user.h> 55 #include <net/ipv6.h> 56 57 #include "dlm_internal.h" 58 #include "lowcomms.h" 59 #include "midcomms.h" 60 #include "config.h" 61 62 #define NEEDED_RMEM (4*1024*1024) 63 #define CONN_HASH_SIZE 32 64 65 struct cbuf { 66 unsigned int base; 67 unsigned int len; 68 unsigned int mask; 69 }; 70 71 static void cbuf_add(struct cbuf *cb, int n) 72 { 73 cb->len += n; 74 } 75 76 static int cbuf_data(struct cbuf *cb) 77 { 78 return ((cb->base + cb->len) & cb->mask); 79 } 80 81 static void cbuf_init(struct cbuf *cb, int size) 82 { 83 cb->base = cb->len = 0; 84 cb->mask = size-1; 85 } 86 87 static void cbuf_eat(struct cbuf *cb, int n) 88 { 89 cb->len -= n; 90 cb->base += n; 91 cb->base &= cb->mask; 92 } 93 94 static bool cbuf_empty(struct cbuf *cb) 95 { 96 return cb->len == 0; 97 } 98 99 struct connection { 100 struct socket *sock; /* NULL if not connected */ 101 uint32_t nodeid; /* So we know who we are in the list */ 102 struct mutex sock_mutex; 103 unsigned long flags; 104 #define CF_READ_PENDING 1 105 #define CF_WRITE_PENDING 2 106 #define CF_CONNECT_PENDING 3 107 #define CF_INIT_PENDING 4 108 #define CF_IS_OTHERCON 5 109 #define CF_CLOSE 6 110 struct list_head writequeue; /* List of outgoing writequeue_entries */ 111 spinlock_t writequeue_lock; 112 int (*rx_action) (struct connection *); /* What to do when active */ 113 void (*connect_action) (struct connection *); /* What to do to connect */ 114 struct page *rx_page; 115 struct cbuf cb; 116 int retries; 117 #define MAX_CONNECT_RETRIES 3 118 int sctp_assoc; 119 struct hlist_node list; 120 struct connection *othercon; 121 struct work_struct rwork; /* Receive workqueue */ 122 struct work_struct swork; /* Send workqueue */ 123 }; 124 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 125 126 /* An entry waiting to be sent */ 127 struct writequeue_entry { 128 struct list_head list; 129 struct page *page; 130 int offset; 131 int len; 132 int end; 133 int users; 134 struct connection *con; 135 }; 136 137 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 138 static int dlm_local_count; 139 140 /* Work queues */ 141 static struct workqueue_struct *recv_workqueue; 142 static struct workqueue_struct *send_workqueue; 143 144 static struct hlist_head connection_hash[CONN_HASH_SIZE]; 145 static DEFINE_MUTEX(connections_lock); 146 static struct kmem_cache *con_cache; 147 148 static void process_recv_sockets(struct work_struct *work); 149 static void process_send_sockets(struct work_struct *work); 150 151 152 /* This is deliberately very simple because most clusters have simple 153 sequential nodeids, so we should be able to go straight to a connection 154 struct in the array */ 155 static inline int nodeid_hash(int nodeid) 156 { 157 return nodeid & (CONN_HASH_SIZE-1); 158 } 159 160 static struct connection *__find_con(int nodeid) 161 { 162 int r; 163 struct hlist_node *h; 164 struct connection *con; 165 166 r = nodeid_hash(nodeid); 167 168 hlist_for_each_entry(con, h, &connection_hash[r], list) { 169 if (con->nodeid == nodeid) 170 return con; 171 } 172 return NULL; 173 } 174 175 /* 176 * If 'allocation' is zero then we don't attempt to create a new 177 * connection structure for this node. 178 */ 179 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 180 { 181 struct connection *con = NULL; 182 int r; 183 184 con = __find_con(nodeid); 185 if (con || !alloc) 186 return con; 187 188 con = kmem_cache_zalloc(con_cache, alloc); 189 if (!con) 190 return NULL; 191 192 r = nodeid_hash(nodeid); 193 hlist_add_head(&con->list, &connection_hash[r]); 194 195 con->nodeid = nodeid; 196 mutex_init(&con->sock_mutex); 197 INIT_LIST_HEAD(&con->writequeue); 198 spin_lock_init(&con->writequeue_lock); 199 INIT_WORK(&con->swork, process_send_sockets); 200 INIT_WORK(&con->rwork, process_recv_sockets); 201 202 /* Setup action pointers for child sockets */ 203 if (con->nodeid) { 204 struct connection *zerocon = __find_con(0); 205 206 con->connect_action = zerocon->connect_action; 207 if (!con->rx_action) 208 con->rx_action = zerocon->rx_action; 209 } 210 211 return con; 212 } 213 214 /* Loop round all connections */ 215 static void foreach_conn(void (*conn_func)(struct connection *c)) 216 { 217 int i; 218 struct hlist_node *h, *n; 219 struct connection *con; 220 221 for (i = 0; i < CONN_HASH_SIZE; i++) { 222 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ 223 conn_func(con); 224 } 225 } 226 } 227 228 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 229 { 230 struct connection *con; 231 232 mutex_lock(&connections_lock); 233 con = __nodeid2con(nodeid, allocation); 234 mutex_unlock(&connections_lock); 235 236 return con; 237 } 238 239 /* This is a bit drastic, but only called when things go wrong */ 240 static struct connection *assoc2con(int assoc_id) 241 { 242 int i; 243 struct hlist_node *h; 244 struct connection *con; 245 246 mutex_lock(&connections_lock); 247 248 for (i = 0 ; i < CONN_HASH_SIZE; i++) { 249 hlist_for_each_entry(con, h, &connection_hash[i], list) { 250 if (con && con->sctp_assoc == assoc_id) { 251 mutex_unlock(&connections_lock); 252 return con; 253 } 254 } 255 } 256 mutex_unlock(&connections_lock); 257 return NULL; 258 } 259 260 static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 261 { 262 struct sockaddr_storage addr; 263 int error; 264 265 if (!dlm_local_count) 266 return -1; 267 268 error = dlm_nodeid_to_addr(nodeid, &addr); 269 if (error) 270 return error; 271 272 if (dlm_local_addr[0]->ss_family == AF_INET) { 273 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 274 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 275 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 276 } else { 277 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 278 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 279 ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr); 280 } 281 282 return 0; 283 } 284 285 /* Data available on socket or listen socket received a connect */ 286 static void lowcomms_data_ready(struct sock *sk, int count_unused) 287 { 288 struct connection *con = sock2con(sk); 289 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 290 queue_work(recv_workqueue, &con->rwork); 291 } 292 293 static void lowcomms_write_space(struct sock *sk) 294 { 295 struct connection *con = sock2con(sk); 296 297 if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 298 queue_work(send_workqueue, &con->swork); 299 } 300 301 static inline void lowcomms_connect_sock(struct connection *con) 302 { 303 if (test_bit(CF_CLOSE, &con->flags)) 304 return; 305 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 306 queue_work(send_workqueue, &con->swork); 307 } 308 309 static void lowcomms_state_change(struct sock *sk) 310 { 311 if (sk->sk_state == TCP_ESTABLISHED) 312 lowcomms_write_space(sk); 313 } 314 315 int dlm_lowcomms_connect_node(int nodeid) 316 { 317 struct connection *con; 318 319 /* with sctp there's no connecting without sending */ 320 if (dlm_config.ci_protocol != 0) 321 return 0; 322 323 if (nodeid == dlm_our_nodeid()) 324 return 0; 325 326 con = nodeid2con(nodeid, GFP_NOFS); 327 if (!con) 328 return -ENOMEM; 329 lowcomms_connect_sock(con); 330 return 0; 331 } 332 333 /* Make a socket active */ 334 static int add_sock(struct socket *sock, struct connection *con) 335 { 336 con->sock = sock; 337 338 /* Install a data_ready callback */ 339 con->sock->sk->sk_data_ready = lowcomms_data_ready; 340 con->sock->sk->sk_write_space = lowcomms_write_space; 341 con->sock->sk->sk_state_change = lowcomms_state_change; 342 con->sock->sk->sk_user_data = con; 343 con->sock->sk->sk_allocation = GFP_NOFS; 344 return 0; 345 } 346 347 /* Add the port number to an IPv6 or 4 sockaddr and return the address 348 length */ 349 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 350 int *addr_len) 351 { 352 saddr->ss_family = dlm_local_addr[0]->ss_family; 353 if (saddr->ss_family == AF_INET) { 354 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 355 in4_addr->sin_port = cpu_to_be16(port); 356 *addr_len = sizeof(struct sockaddr_in); 357 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 358 } else { 359 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 360 in6_addr->sin6_port = cpu_to_be16(port); 361 *addr_len = sizeof(struct sockaddr_in6); 362 } 363 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 364 } 365 366 /* Close a remote connection and tidy up */ 367 static void close_connection(struct connection *con, bool and_other) 368 { 369 mutex_lock(&con->sock_mutex); 370 371 if (con->sock) { 372 sock_release(con->sock); 373 con->sock = NULL; 374 } 375 if (con->othercon && and_other) { 376 /* Will only re-enter once. */ 377 close_connection(con->othercon, false); 378 } 379 if (con->rx_page) { 380 __free_page(con->rx_page); 381 con->rx_page = NULL; 382 } 383 384 con->retries = 0; 385 mutex_unlock(&con->sock_mutex); 386 } 387 388 /* We only send shutdown messages to nodes that are not part of the cluster */ 389 static void sctp_send_shutdown(sctp_assoc_t associd) 390 { 391 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 392 struct msghdr outmessage; 393 struct cmsghdr *cmsg; 394 struct sctp_sndrcvinfo *sinfo; 395 int ret; 396 struct connection *con; 397 398 con = nodeid2con(0,0); 399 BUG_ON(con == NULL); 400 401 outmessage.msg_name = NULL; 402 outmessage.msg_namelen = 0; 403 outmessage.msg_control = outcmsg; 404 outmessage.msg_controllen = sizeof(outcmsg); 405 outmessage.msg_flags = MSG_EOR; 406 407 cmsg = CMSG_FIRSTHDR(&outmessage); 408 cmsg->cmsg_level = IPPROTO_SCTP; 409 cmsg->cmsg_type = SCTP_SNDRCV; 410 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 411 outmessage.msg_controllen = cmsg->cmsg_len; 412 sinfo = CMSG_DATA(cmsg); 413 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 414 415 sinfo->sinfo_flags |= MSG_EOF; 416 sinfo->sinfo_assoc_id = associd; 417 418 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0); 419 420 if (ret != 0) 421 log_print("send EOF to node failed: %d", ret); 422 } 423 424 static void sctp_init_failed_foreach(struct connection *con) 425 { 426 con->sctp_assoc = 0; 427 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 428 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 429 queue_work(send_workqueue, &con->swork); 430 } 431 } 432 433 /* INIT failed but we don't know which node... 434 restart INIT on all pending nodes */ 435 static void sctp_init_failed(void) 436 { 437 mutex_lock(&connections_lock); 438 439 foreach_conn(sctp_init_failed_foreach); 440 441 mutex_unlock(&connections_lock); 442 } 443 444 /* Something happened to an association */ 445 static void process_sctp_notification(struct connection *con, 446 struct msghdr *msg, char *buf) 447 { 448 union sctp_notification *sn = (union sctp_notification *)buf; 449 450 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { 451 switch (sn->sn_assoc_change.sac_state) { 452 453 case SCTP_COMM_UP: 454 case SCTP_RESTART: 455 { 456 /* Check that the new node is in the lockspace */ 457 struct sctp_prim prim; 458 int nodeid; 459 int prim_len, ret; 460 int addr_len; 461 struct connection *new_con; 462 sctp_peeloff_arg_t parg; 463 int parglen = sizeof(parg); 464 int err; 465 466 /* 467 * We get this before any data for an association. 468 * We verify that the node is in the cluster and 469 * then peel off a socket for it. 470 */ 471 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 472 log_print("COMM_UP for invalid assoc ID %d", 473 (int)sn->sn_assoc_change.sac_assoc_id); 474 sctp_init_failed(); 475 return; 476 } 477 memset(&prim, 0, sizeof(struct sctp_prim)); 478 prim_len = sizeof(struct sctp_prim); 479 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id; 480 481 ret = kernel_getsockopt(con->sock, 482 IPPROTO_SCTP, 483 SCTP_PRIMARY_ADDR, 484 (char*)&prim, 485 &prim_len); 486 if (ret < 0) { 487 log_print("getsockopt/sctp_primary_addr on " 488 "new assoc %d failed : %d", 489 (int)sn->sn_assoc_change.sac_assoc_id, 490 ret); 491 492 /* Retry INIT later */ 493 new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 494 if (new_con) 495 clear_bit(CF_CONNECT_PENDING, &con->flags); 496 return; 497 } 498 make_sockaddr(&prim.ssp_addr, 0, &addr_len); 499 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) { 500 int i; 501 unsigned char *b=(unsigned char *)&prim.ssp_addr; 502 log_print("reject connect from unknown addr"); 503 for (i=0; i<sizeof(struct sockaddr_storage);i++) 504 printk("%02x ", b[i]); 505 printk("\n"); 506 sctp_send_shutdown(prim.ssp_assoc_id); 507 return; 508 } 509 510 new_con = nodeid2con(nodeid, GFP_NOFS); 511 if (!new_con) 512 return; 513 514 /* Peel off a new sock */ 515 parg.associd = sn->sn_assoc_change.sac_assoc_id; 516 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, 517 SCTP_SOCKOPT_PEELOFF, 518 (void *)&parg, &parglen); 519 if (ret < 0) { 520 log_print("Can't peel off a socket for " 521 "connection %d to node %d: err=%d", 522 parg.associd, nodeid, ret); 523 return; 524 } 525 new_con->sock = sockfd_lookup(parg.sd, &err); 526 if (!new_con->sock) { 527 log_print("sockfd_lookup error %d", err); 528 return; 529 } 530 add_sock(new_con->sock, new_con); 531 sockfd_put(new_con->sock); 532 533 log_print("connecting to %d sctp association %d", 534 nodeid, (int)sn->sn_assoc_change.sac_assoc_id); 535 536 /* Send any pending writes */ 537 clear_bit(CF_CONNECT_PENDING, &new_con->flags); 538 clear_bit(CF_INIT_PENDING, &con->flags); 539 if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { 540 queue_work(send_workqueue, &new_con->swork); 541 } 542 if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags)) 543 queue_work(recv_workqueue, &new_con->rwork); 544 } 545 break; 546 547 case SCTP_COMM_LOST: 548 case SCTP_SHUTDOWN_COMP: 549 { 550 con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 551 if (con) { 552 con->sctp_assoc = 0; 553 } 554 } 555 break; 556 557 /* We don't know which INIT failed, so clear the PENDING flags 558 * on them all. if assoc_id is zero then it will then try 559 * again */ 560 561 case SCTP_CANT_STR_ASSOC: 562 { 563 log_print("Can't start SCTP association - retrying"); 564 sctp_init_failed(); 565 } 566 break; 567 568 default: 569 log_print("unexpected SCTP assoc change id=%d state=%d", 570 (int)sn->sn_assoc_change.sac_assoc_id, 571 sn->sn_assoc_change.sac_state); 572 } 573 } 574 } 575 576 /* Data received from remote end */ 577 static int receive_from_sock(struct connection *con) 578 { 579 int ret = 0; 580 struct msghdr msg = {}; 581 struct kvec iov[2]; 582 unsigned len; 583 int r; 584 int call_again_soon = 0; 585 int nvec; 586 char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 587 588 mutex_lock(&con->sock_mutex); 589 590 if (con->sock == NULL) { 591 ret = -EAGAIN; 592 goto out_close; 593 } 594 595 if (con->rx_page == NULL) { 596 /* 597 * This doesn't need to be atomic, but I think it should 598 * improve performance if it is. 599 */ 600 con->rx_page = alloc_page(GFP_ATOMIC); 601 if (con->rx_page == NULL) 602 goto out_resched; 603 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 604 } 605 606 /* Only SCTP needs these really */ 607 memset(&incmsg, 0, sizeof(incmsg)); 608 msg.msg_control = incmsg; 609 msg.msg_controllen = sizeof(incmsg); 610 611 /* 612 * iov[0] is the bit of the circular buffer between the current end 613 * point (cb.base + cb.len) and the end of the buffer. 614 */ 615 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 616 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 617 iov[1].iov_len = 0; 618 nvec = 1; 619 620 /* 621 * iov[1] is the bit of the circular buffer between the start of the 622 * buffer and the start of the currently used section (cb.base) 623 */ 624 if (cbuf_data(&con->cb) >= con->cb.base) { 625 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 626 iov[1].iov_len = con->cb.base; 627 iov[1].iov_base = page_address(con->rx_page); 628 nvec = 2; 629 } 630 len = iov[0].iov_len + iov[1].iov_len; 631 632 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len, 633 MSG_DONTWAIT | MSG_NOSIGNAL); 634 if (ret <= 0) 635 goto out_close; 636 637 /* Process SCTP notifications */ 638 if (msg.msg_flags & MSG_NOTIFICATION) { 639 msg.msg_control = incmsg; 640 msg.msg_controllen = sizeof(incmsg); 641 642 process_sctp_notification(con, &msg, 643 page_address(con->rx_page) + con->cb.base); 644 mutex_unlock(&con->sock_mutex); 645 return 0; 646 } 647 BUG_ON(con->nodeid == 0); 648 649 if (ret == len) 650 call_again_soon = 1; 651 cbuf_add(&con->cb, ret); 652 ret = dlm_process_incoming_buffer(con->nodeid, 653 page_address(con->rx_page), 654 con->cb.base, con->cb.len, 655 PAGE_CACHE_SIZE); 656 if (ret == -EBADMSG) { 657 log_print("lowcomms: addr=%p, base=%u, len=%u, " 658 "iov_len=%u, iov_base[0]=%p, read=%d", 659 page_address(con->rx_page), con->cb.base, con->cb.len, 660 len, iov[0].iov_base, r); 661 } 662 if (ret < 0) 663 goto out_close; 664 cbuf_eat(&con->cb, ret); 665 666 if (cbuf_empty(&con->cb) && !call_again_soon) { 667 __free_page(con->rx_page); 668 con->rx_page = NULL; 669 } 670 671 if (call_again_soon) 672 goto out_resched; 673 mutex_unlock(&con->sock_mutex); 674 return 0; 675 676 out_resched: 677 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 678 queue_work(recv_workqueue, &con->rwork); 679 mutex_unlock(&con->sock_mutex); 680 return -EAGAIN; 681 682 out_close: 683 mutex_unlock(&con->sock_mutex); 684 if (ret != -EAGAIN) { 685 close_connection(con, false); 686 /* Reconnect when there is something to send */ 687 } 688 /* Don't return success if we really got EOF */ 689 if (ret == 0) 690 ret = -EAGAIN; 691 692 return ret; 693 } 694 695 /* Listening socket is busy, accept a connection */ 696 static int tcp_accept_from_sock(struct connection *con) 697 { 698 int result; 699 struct sockaddr_storage peeraddr; 700 struct socket *newsock; 701 int len; 702 int nodeid; 703 struct connection *newcon; 704 struct connection *addcon; 705 706 memset(&peeraddr, 0, sizeof(peeraddr)); 707 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 708 IPPROTO_TCP, &newsock); 709 if (result < 0) 710 return -ENOMEM; 711 712 mutex_lock_nested(&con->sock_mutex, 0); 713 714 result = -ENOTCONN; 715 if (con->sock == NULL) 716 goto accept_err; 717 718 newsock->type = con->sock->type; 719 newsock->ops = con->sock->ops; 720 721 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 722 if (result < 0) 723 goto accept_err; 724 725 /* Get the connected socket's peer */ 726 memset(&peeraddr, 0, sizeof(peeraddr)); 727 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 728 &len, 2)) { 729 result = -ECONNABORTED; 730 goto accept_err; 731 } 732 733 /* Get the new node's NODEID */ 734 make_sockaddr(&peeraddr, 0, &len); 735 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 736 log_print("connect from non cluster node"); 737 sock_release(newsock); 738 mutex_unlock(&con->sock_mutex); 739 return -1; 740 } 741 742 log_print("got connection from %d", nodeid); 743 744 /* Check to see if we already have a connection to this node. This 745 * could happen if the two nodes initiate a connection at roughly 746 * the same time and the connections cross on the wire. 747 * In this case we store the incoming one in "othercon" 748 */ 749 newcon = nodeid2con(nodeid, GFP_NOFS); 750 if (!newcon) { 751 result = -ENOMEM; 752 goto accept_err; 753 } 754 mutex_lock_nested(&newcon->sock_mutex, 1); 755 if (newcon->sock) { 756 struct connection *othercon = newcon->othercon; 757 758 if (!othercon) { 759 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); 760 if (!othercon) { 761 log_print("failed to allocate incoming socket"); 762 mutex_unlock(&newcon->sock_mutex); 763 result = -ENOMEM; 764 goto accept_err; 765 } 766 othercon->nodeid = nodeid; 767 othercon->rx_action = receive_from_sock; 768 mutex_init(&othercon->sock_mutex); 769 INIT_WORK(&othercon->swork, process_send_sockets); 770 INIT_WORK(&othercon->rwork, process_recv_sockets); 771 set_bit(CF_IS_OTHERCON, &othercon->flags); 772 } 773 if (!othercon->sock) { 774 newcon->othercon = othercon; 775 othercon->sock = newsock; 776 newsock->sk->sk_user_data = othercon; 777 add_sock(newsock, othercon); 778 addcon = othercon; 779 } 780 else { 781 printk("Extra connection from node %d attempted\n", nodeid); 782 result = -EAGAIN; 783 mutex_unlock(&newcon->sock_mutex); 784 goto accept_err; 785 } 786 } 787 else { 788 newsock->sk->sk_user_data = newcon; 789 newcon->rx_action = receive_from_sock; 790 add_sock(newsock, newcon); 791 addcon = newcon; 792 } 793 794 mutex_unlock(&newcon->sock_mutex); 795 796 /* 797 * Add it to the active queue in case we got data 798 * beween processing the accept adding the socket 799 * to the read_sockets list 800 */ 801 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 802 queue_work(recv_workqueue, &addcon->rwork); 803 mutex_unlock(&con->sock_mutex); 804 805 return 0; 806 807 accept_err: 808 mutex_unlock(&con->sock_mutex); 809 sock_release(newsock); 810 811 if (result != -EAGAIN) 812 log_print("error accepting connection from node: %d", result); 813 return result; 814 } 815 816 static void free_entry(struct writequeue_entry *e) 817 { 818 __free_page(e->page); 819 kfree(e); 820 } 821 822 /* Initiate an SCTP association. 823 This is a special case of send_to_sock() in that we don't yet have a 824 peeled-off socket for this association, so we use the listening socket 825 and add the primary IP address of the remote node. 826 */ 827 static void sctp_init_assoc(struct connection *con) 828 { 829 struct sockaddr_storage rem_addr; 830 char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 831 struct msghdr outmessage; 832 struct cmsghdr *cmsg; 833 struct sctp_sndrcvinfo *sinfo; 834 struct connection *base_con; 835 struct writequeue_entry *e; 836 int len, offset; 837 int ret; 838 int addrlen; 839 struct kvec iov[1]; 840 841 if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) 842 return; 843 844 if (con->retries++ > MAX_CONNECT_RETRIES) 845 return; 846 847 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) { 848 log_print("no address for nodeid %d", con->nodeid); 849 return; 850 } 851 base_con = nodeid2con(0, 0); 852 BUG_ON(base_con == NULL); 853 854 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen); 855 856 outmessage.msg_name = &rem_addr; 857 outmessage.msg_namelen = addrlen; 858 outmessage.msg_control = outcmsg; 859 outmessage.msg_controllen = sizeof(outcmsg); 860 outmessage.msg_flags = MSG_EOR; 861 862 spin_lock(&con->writequeue_lock); 863 864 if (list_empty(&con->writequeue)) { 865 spin_unlock(&con->writequeue_lock); 866 log_print("writequeue empty for nodeid %d", con->nodeid); 867 return; 868 } 869 870 e = list_first_entry(&con->writequeue, struct writequeue_entry, list); 871 len = e->len; 872 offset = e->offset; 873 spin_unlock(&con->writequeue_lock); 874 875 /* Send the first block off the write queue */ 876 iov[0].iov_base = page_address(e->page)+offset; 877 iov[0].iov_len = len; 878 879 cmsg = CMSG_FIRSTHDR(&outmessage); 880 cmsg->cmsg_level = IPPROTO_SCTP; 881 cmsg->cmsg_type = SCTP_SNDRCV; 882 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 883 sinfo = CMSG_DATA(cmsg); 884 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 885 sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); 886 outmessage.msg_controllen = cmsg->cmsg_len; 887 888 ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); 889 if (ret < 0) { 890 log_print("Send first packet to node %d failed: %d", 891 con->nodeid, ret); 892 893 /* Try again later */ 894 clear_bit(CF_CONNECT_PENDING, &con->flags); 895 clear_bit(CF_INIT_PENDING, &con->flags); 896 } 897 else { 898 spin_lock(&con->writequeue_lock); 899 e->offset += ret; 900 e->len -= ret; 901 902 if (e->len == 0 && e->users == 0) { 903 list_del(&e->list); 904 free_entry(e); 905 } 906 spin_unlock(&con->writequeue_lock); 907 } 908 } 909 910 /* Connect a new socket to its peer */ 911 static void tcp_connect_to_sock(struct connection *con) 912 { 913 int result = -EHOSTUNREACH; 914 struct sockaddr_storage saddr, src_addr; 915 int addr_len; 916 struct socket *sock = NULL; 917 918 if (con->nodeid == 0) { 919 log_print("attempt to connect sock 0 foiled"); 920 return; 921 } 922 923 mutex_lock(&con->sock_mutex); 924 if (con->retries++ > MAX_CONNECT_RETRIES) 925 goto out; 926 927 /* Some odd races can cause double-connects, ignore them */ 928 if (con->sock) { 929 result = 0; 930 goto out; 931 } 932 933 /* Create a socket to communicate with */ 934 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 935 IPPROTO_TCP, &sock); 936 if (result < 0) 937 goto out_err; 938 939 memset(&saddr, 0, sizeof(saddr)); 940 if (dlm_nodeid_to_addr(con->nodeid, &saddr)) 941 goto out_err; 942 943 sock->sk->sk_user_data = con; 944 con->rx_action = receive_from_sock; 945 con->connect_action = tcp_connect_to_sock; 946 add_sock(sock, con); 947 948 /* Bind to our cluster-known address connecting to avoid 949 routing problems */ 950 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 951 make_sockaddr(&src_addr, 0, &addr_len); 952 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, 953 addr_len); 954 if (result < 0) { 955 log_print("could not bind for connect: %d", result); 956 /* This *may* not indicate a critical error */ 957 } 958 959 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 960 961 log_print("connecting to %d", con->nodeid); 962 result = 963 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 964 O_NONBLOCK); 965 if (result == -EINPROGRESS) 966 result = 0; 967 if (result == 0) 968 goto out; 969 970 out_err: 971 if (con->sock) { 972 sock_release(con->sock); 973 con->sock = NULL; 974 } else if (sock) { 975 sock_release(sock); 976 } 977 /* 978 * Some errors are fatal and this list might need adjusting. For other 979 * errors we try again until the max number of retries is reached. 980 */ 981 if (result != -EHOSTUNREACH && result != -ENETUNREACH && 982 result != -ENETDOWN && result != -EINVAL 983 && result != -EPROTONOSUPPORT) { 984 lowcomms_connect_sock(con); 985 result = 0; 986 } 987 out: 988 mutex_unlock(&con->sock_mutex); 989 return; 990 } 991 992 static struct socket *tcp_create_listen_sock(struct connection *con, 993 struct sockaddr_storage *saddr) 994 { 995 struct socket *sock = NULL; 996 int result = 0; 997 int one = 1; 998 int addr_len; 999 1000 if (dlm_local_addr[0]->ss_family == AF_INET) 1001 addr_len = sizeof(struct sockaddr_in); 1002 else 1003 addr_len = sizeof(struct sockaddr_in6); 1004 1005 /* Create a socket to communicate with */ 1006 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 1007 IPPROTO_TCP, &sock); 1008 if (result < 0) { 1009 log_print("Can't create listening comms socket"); 1010 goto create_out; 1011 } 1012 1013 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 1014 (char *)&one, sizeof(one)); 1015 1016 if (result < 0) { 1017 log_print("Failed to set SO_REUSEADDR on socket: %d", result); 1018 } 1019 sock->sk->sk_user_data = con; 1020 con->rx_action = tcp_accept_from_sock; 1021 con->connect_action = tcp_connect_to_sock; 1022 con->sock = sock; 1023 1024 /* Bind to our port */ 1025 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 1026 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 1027 if (result < 0) { 1028 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 1029 sock_release(sock); 1030 sock = NULL; 1031 con->sock = NULL; 1032 goto create_out; 1033 } 1034 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 1035 (char *)&one, sizeof(one)); 1036 if (result < 0) { 1037 log_print("Set keepalive failed: %d", result); 1038 } 1039 1040 result = sock->ops->listen(sock, 5); 1041 if (result < 0) { 1042 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 1043 sock_release(sock); 1044 sock = NULL; 1045 goto create_out; 1046 } 1047 1048 create_out: 1049 return sock; 1050 } 1051 1052 /* Get local addresses */ 1053 static void init_local(void) 1054 { 1055 struct sockaddr_storage sas, *addr; 1056 int i; 1057 1058 dlm_local_count = 0; 1059 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) { 1060 if (dlm_our_addr(&sas, i)) 1061 break; 1062 1063 addr = kmalloc(sizeof(*addr), GFP_NOFS); 1064 if (!addr) 1065 break; 1066 memcpy(addr, &sas, sizeof(*addr)); 1067 dlm_local_addr[dlm_local_count++] = addr; 1068 } 1069 } 1070 1071 /* Bind to an IP address. SCTP allows multiple address so it can do 1072 multi-homing */ 1073 static int add_sctp_bind_addr(struct connection *sctp_con, 1074 struct sockaddr_storage *addr, 1075 int addr_len, int num) 1076 { 1077 int result = 0; 1078 1079 if (num == 1) 1080 result = kernel_bind(sctp_con->sock, 1081 (struct sockaddr *) addr, 1082 addr_len); 1083 else 1084 result = kernel_setsockopt(sctp_con->sock, SOL_SCTP, 1085 SCTP_SOCKOPT_BINDX_ADD, 1086 (char *)addr, addr_len); 1087 1088 if (result < 0) 1089 log_print("Can't bind to port %d addr number %d", 1090 dlm_config.ci_tcp_port, num); 1091 1092 return result; 1093 } 1094 1095 /* Initialise SCTP socket and bind to all interfaces */ 1096 static int sctp_listen_for_all(void) 1097 { 1098 struct socket *sock = NULL; 1099 struct sockaddr_storage localaddr; 1100 struct sctp_event_subscribe subscribe; 1101 int result = -EINVAL, num = 1, i, addr_len; 1102 struct connection *con = nodeid2con(0, GFP_NOFS); 1103 int bufsize = NEEDED_RMEM; 1104 1105 if (!con) 1106 return -ENOMEM; 1107 1108 log_print("Using SCTP for communications"); 1109 1110 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET, 1111 IPPROTO_SCTP, &sock); 1112 if (result < 0) { 1113 log_print("Can't create comms socket, check SCTP is loaded"); 1114 goto out; 1115 } 1116 1117 /* Listen for events */ 1118 memset(&subscribe, 0, sizeof(subscribe)); 1119 subscribe.sctp_data_io_event = 1; 1120 subscribe.sctp_association_event = 1; 1121 subscribe.sctp_send_failure_event = 1; 1122 subscribe.sctp_shutdown_event = 1; 1123 subscribe.sctp_partial_delivery_event = 1; 1124 1125 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE, 1126 (char *)&bufsize, sizeof(bufsize)); 1127 if (result) 1128 log_print("Error increasing buffer space on socket %d", result); 1129 1130 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS, 1131 (char *)&subscribe, sizeof(subscribe)); 1132 if (result < 0) { 1133 log_print("Failed to set SCTP_EVENTS on socket: result=%d", 1134 result); 1135 goto create_delsock; 1136 } 1137 1138 /* Init con struct */ 1139 sock->sk->sk_user_data = con; 1140 con->sock = sock; 1141 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1142 con->rx_action = receive_from_sock; 1143 con->connect_action = sctp_init_assoc; 1144 1145 /* Bind to all interfaces. */ 1146 for (i = 0; i < dlm_local_count; i++) { 1147 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1148 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len); 1149 1150 result = add_sctp_bind_addr(con, &localaddr, addr_len, num); 1151 if (result) 1152 goto create_delsock; 1153 ++num; 1154 } 1155 1156 result = sock->ops->listen(sock, 5); 1157 if (result < 0) { 1158 log_print("Can't set socket listening"); 1159 goto create_delsock; 1160 } 1161 1162 return 0; 1163 1164 create_delsock: 1165 sock_release(sock); 1166 con->sock = NULL; 1167 out: 1168 return result; 1169 } 1170 1171 static int tcp_listen_for_all(void) 1172 { 1173 struct socket *sock = NULL; 1174 struct connection *con = nodeid2con(0, GFP_NOFS); 1175 int result = -EINVAL; 1176 1177 if (!con) 1178 return -ENOMEM; 1179 1180 /* We don't support multi-homed hosts */ 1181 if (dlm_local_addr[1] != NULL) { 1182 log_print("TCP protocol can't handle multi-homed hosts, " 1183 "try SCTP"); 1184 return -EINVAL; 1185 } 1186 1187 log_print("Using TCP for communications"); 1188 1189 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1190 if (sock) { 1191 add_sock(sock, con); 1192 result = 0; 1193 } 1194 else { 1195 result = -EADDRINUSE; 1196 } 1197 1198 return result; 1199 } 1200 1201 1202 1203 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1204 gfp_t allocation) 1205 { 1206 struct writequeue_entry *entry; 1207 1208 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1209 if (!entry) 1210 return NULL; 1211 1212 entry->page = alloc_page(allocation); 1213 if (!entry->page) { 1214 kfree(entry); 1215 return NULL; 1216 } 1217 1218 entry->offset = 0; 1219 entry->len = 0; 1220 entry->end = 0; 1221 entry->users = 0; 1222 entry->con = con; 1223 1224 return entry; 1225 } 1226 1227 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1228 { 1229 struct connection *con; 1230 struct writequeue_entry *e; 1231 int offset = 0; 1232 int users = 0; 1233 1234 con = nodeid2con(nodeid, allocation); 1235 if (!con) 1236 return NULL; 1237 1238 spin_lock(&con->writequeue_lock); 1239 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1240 if ((&e->list == &con->writequeue) || 1241 (PAGE_CACHE_SIZE - e->end < len)) { 1242 e = NULL; 1243 } else { 1244 offset = e->end; 1245 e->end += len; 1246 users = e->users++; 1247 } 1248 spin_unlock(&con->writequeue_lock); 1249 1250 if (e) { 1251 got_one: 1252 *ppc = page_address(e->page) + offset; 1253 return e; 1254 } 1255 1256 e = new_writequeue_entry(con, allocation); 1257 if (e) { 1258 spin_lock(&con->writequeue_lock); 1259 offset = e->end; 1260 e->end += len; 1261 users = e->users++; 1262 list_add_tail(&e->list, &con->writequeue); 1263 spin_unlock(&con->writequeue_lock); 1264 goto got_one; 1265 } 1266 return NULL; 1267 } 1268 1269 void dlm_lowcomms_commit_buffer(void *mh) 1270 { 1271 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1272 struct connection *con = e->con; 1273 int users; 1274 1275 spin_lock(&con->writequeue_lock); 1276 users = --e->users; 1277 if (users) 1278 goto out; 1279 e->len = e->end - e->offset; 1280 spin_unlock(&con->writequeue_lock); 1281 1282 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 1283 queue_work(send_workqueue, &con->swork); 1284 } 1285 return; 1286 1287 out: 1288 spin_unlock(&con->writequeue_lock); 1289 return; 1290 } 1291 1292 /* Send a message */ 1293 static void send_to_sock(struct connection *con) 1294 { 1295 int ret = 0; 1296 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1297 struct writequeue_entry *e; 1298 int len, offset; 1299 1300 mutex_lock(&con->sock_mutex); 1301 if (con->sock == NULL) 1302 goto out_connect; 1303 1304 spin_lock(&con->writequeue_lock); 1305 for (;;) { 1306 e = list_entry(con->writequeue.next, struct writequeue_entry, 1307 list); 1308 if ((struct list_head *) e == &con->writequeue) 1309 break; 1310 1311 len = e->len; 1312 offset = e->offset; 1313 BUG_ON(len == 0 && e->users == 0); 1314 spin_unlock(&con->writequeue_lock); 1315 1316 ret = 0; 1317 if (len) { 1318 ret = kernel_sendpage(con->sock, e->page, offset, len, 1319 msg_flags); 1320 if (ret == -EAGAIN || ret == 0) { 1321 cond_resched(); 1322 goto out; 1323 } 1324 if (ret <= 0) 1325 goto send_error; 1326 } 1327 /* Don't starve people filling buffers */ 1328 cond_resched(); 1329 1330 spin_lock(&con->writequeue_lock); 1331 e->offset += ret; 1332 e->len -= ret; 1333 1334 if (e->len == 0 && e->users == 0) { 1335 list_del(&e->list); 1336 free_entry(e); 1337 continue; 1338 } 1339 } 1340 spin_unlock(&con->writequeue_lock); 1341 out: 1342 mutex_unlock(&con->sock_mutex); 1343 return; 1344 1345 send_error: 1346 mutex_unlock(&con->sock_mutex); 1347 close_connection(con, false); 1348 lowcomms_connect_sock(con); 1349 return; 1350 1351 out_connect: 1352 mutex_unlock(&con->sock_mutex); 1353 if (!test_bit(CF_INIT_PENDING, &con->flags)) 1354 lowcomms_connect_sock(con); 1355 return; 1356 } 1357 1358 static void clean_one_writequeue(struct connection *con) 1359 { 1360 struct writequeue_entry *e, *safe; 1361 1362 spin_lock(&con->writequeue_lock); 1363 list_for_each_entry_safe(e, safe, &con->writequeue, list) { 1364 list_del(&e->list); 1365 free_entry(e); 1366 } 1367 spin_unlock(&con->writequeue_lock); 1368 } 1369 1370 /* Called from recovery when it knows that a node has 1371 left the cluster */ 1372 int dlm_lowcomms_close(int nodeid) 1373 { 1374 struct connection *con; 1375 1376 log_print("closing connection to node %d", nodeid); 1377 con = nodeid2con(nodeid, 0); 1378 if (con) { 1379 clear_bit(CF_CONNECT_PENDING, &con->flags); 1380 clear_bit(CF_WRITE_PENDING, &con->flags); 1381 set_bit(CF_CLOSE, &con->flags); 1382 if (cancel_work_sync(&con->swork)) 1383 log_print("canceled swork for node %d", nodeid); 1384 if (cancel_work_sync(&con->rwork)) 1385 log_print("canceled rwork for node %d", nodeid); 1386 clean_one_writequeue(con); 1387 close_connection(con, true); 1388 } 1389 return 0; 1390 } 1391 1392 /* Receive workqueue function */ 1393 static void process_recv_sockets(struct work_struct *work) 1394 { 1395 struct connection *con = container_of(work, struct connection, rwork); 1396 int err; 1397 1398 clear_bit(CF_READ_PENDING, &con->flags); 1399 do { 1400 err = con->rx_action(con); 1401 } while (!err); 1402 } 1403 1404 /* Send workqueue function */ 1405 static void process_send_sockets(struct work_struct *work) 1406 { 1407 struct connection *con = container_of(work, struct connection, swork); 1408 1409 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 1410 con->connect_action(con); 1411 set_bit(CF_WRITE_PENDING, &con->flags); 1412 } 1413 if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags)) 1414 send_to_sock(con); 1415 } 1416 1417 1418 /* Discard all entries on the write queues */ 1419 static void clean_writequeues(void) 1420 { 1421 foreach_conn(clean_one_writequeue); 1422 } 1423 1424 static void work_stop(void) 1425 { 1426 destroy_workqueue(recv_workqueue); 1427 destroy_workqueue(send_workqueue); 1428 } 1429 1430 static int work_start(void) 1431 { 1432 int error; 1433 recv_workqueue = create_workqueue("dlm_recv"); 1434 error = IS_ERR(recv_workqueue); 1435 if (error) { 1436 log_print("can't start dlm_recv %d", error); 1437 return error; 1438 } 1439 1440 send_workqueue = create_singlethread_workqueue("dlm_send"); 1441 error = IS_ERR(send_workqueue); 1442 if (error) { 1443 log_print("can't start dlm_send %d", error); 1444 destroy_workqueue(recv_workqueue); 1445 return error; 1446 } 1447 1448 return 0; 1449 } 1450 1451 static void stop_conn(struct connection *con) 1452 { 1453 con->flags |= 0x0F; 1454 if (con->sock && con->sock->sk) 1455 con->sock->sk->sk_user_data = NULL; 1456 } 1457 1458 static void free_conn(struct connection *con) 1459 { 1460 close_connection(con, true); 1461 if (con->othercon) 1462 kmem_cache_free(con_cache, con->othercon); 1463 hlist_del(&con->list); 1464 kmem_cache_free(con_cache, con); 1465 } 1466 1467 void dlm_lowcomms_stop(void) 1468 { 1469 /* Set all the flags to prevent any 1470 socket activity. 1471 */ 1472 mutex_lock(&connections_lock); 1473 foreach_conn(stop_conn); 1474 mutex_unlock(&connections_lock); 1475 1476 work_stop(); 1477 1478 mutex_lock(&connections_lock); 1479 clean_writequeues(); 1480 1481 foreach_conn(free_conn); 1482 1483 mutex_unlock(&connections_lock); 1484 kmem_cache_destroy(con_cache); 1485 } 1486 1487 int dlm_lowcomms_start(void) 1488 { 1489 int error = -EINVAL; 1490 struct connection *con; 1491 int i; 1492 1493 for (i = 0; i < CONN_HASH_SIZE; i++) 1494 INIT_HLIST_HEAD(&connection_hash[i]); 1495 1496 init_local(); 1497 if (!dlm_local_count) { 1498 error = -ENOTCONN; 1499 log_print("no local IP address has been set"); 1500 goto out; 1501 } 1502 1503 error = -ENOMEM; 1504 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1505 __alignof__(struct connection), 0, 1506 NULL); 1507 if (!con_cache) 1508 goto out; 1509 1510 /* Start listening */ 1511 if (dlm_config.ci_protocol == 0) 1512 error = tcp_listen_for_all(); 1513 else 1514 error = sctp_listen_for_all(); 1515 if (error) 1516 goto fail_unlisten; 1517 1518 error = work_start(); 1519 if (error) 1520 goto fail_unlisten; 1521 1522 return 0; 1523 1524 fail_unlisten: 1525 con = nodeid2con(0,0); 1526 if (con) { 1527 close_connection(con, false); 1528 kmem_cache_free(con_cache, con); 1529 } 1530 kmem_cache_destroy(con_cache); 1531 1532 out: 1533 return error; 1534 } 1535