1 /****************************************************************************** 2 ******************************************************************************* 3 ** 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 6 ** 7 ** This copyrighted material is made available to anyone wishing to use, 8 ** modify, copy, or redistribute it subject to the terms and conditions 9 ** of the GNU General Public License v.2. 10 ** 11 ******************************************************************************* 12 ******************************************************************************/ 13 14 /* 15 * lowcomms.c 16 * 17 * This is the "low-level" comms layer. 18 * 19 * It is responsible for sending/receiving messages 20 * from other nodes in the cluster. 21 * 22 * Cluster nodes are referred to by their nodeids. nodeids are 23 * simply 32 bit numbers to the locking module - if they need to 24 * be expanded for the cluster infrastructure then that is it's 25 * responsibility. It is this layer's 26 * responsibility to resolve these into IP address or 27 * whatever it needs for inter-node communication. 28 * 29 * The comms level is two kernel threads that deal mainly with 30 * the receiving of messages from other nodes and passing them 31 * up to the mid-level comms layer (which understands the 32 * message format) for execution by the locking core, and 33 * a send thread which does all the setting up of connections 34 * to remote nodes and the sending of data. Threads are not allowed 35 * to send their own data because it may cause them to wait in times 36 * of high load. Also, this way, the sending thread can collect together 37 * messages bound for one node and send them in one block. 38 * 39 * lowcomms will choose to use wither TCP or SCTP as its transport layer 40 * depending on the configuration variable 'protocol'. This should be set 41 * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 43 * for the DLM to function. 44 * 45 */ 46 47 #include <asm/ioctls.h> 48 #include <net/sock.h> 49 #include <net/tcp.h> 50 #include <linux/pagemap.h> 51 #include <linux/idr.h> 52 #include <linux/file.h> 53 #include <linux/mutex.h> 54 #include <linux/sctp.h> 55 #include <net/sctp/user.h> 56 57 #include "dlm_internal.h" 58 #include "lowcomms.h" 59 #include "midcomms.h" 60 #include "config.h" 61 62 #define NEEDED_RMEM (4*1024*1024) 63 64 struct cbuf { 65 unsigned int base; 66 unsigned int len; 67 unsigned int mask; 68 }; 69 70 static void cbuf_add(struct cbuf *cb, int n) 71 { 72 cb->len += n; 73 } 74 75 static int cbuf_data(struct cbuf *cb) 76 { 77 return ((cb->base + cb->len) & cb->mask); 78 } 79 80 static void cbuf_init(struct cbuf *cb, int size) 81 { 82 cb->base = cb->len = 0; 83 cb->mask = size-1; 84 } 85 86 static void cbuf_eat(struct cbuf *cb, int n) 87 { 88 cb->len -= n; 89 cb->base += n; 90 cb->base &= cb->mask; 91 } 92 93 static bool cbuf_empty(struct cbuf *cb) 94 { 95 return cb->len == 0; 96 } 97 98 struct connection { 99 struct socket *sock; /* NULL if not connected */ 100 uint32_t nodeid; /* So we know who we are in the list */ 101 struct mutex sock_mutex; 102 unsigned long flags; 103 #define CF_READ_PENDING 1 104 #define CF_WRITE_PENDING 2 105 #define CF_CONNECT_PENDING 3 106 #define CF_INIT_PENDING 4 107 #define CF_IS_OTHERCON 5 108 struct list_head writequeue; /* List of outgoing writequeue_entries */ 109 spinlock_t writequeue_lock; 110 int (*rx_action) (struct connection *); /* What to do when active */ 111 void (*connect_action) (struct connection *); /* What to do to connect */ 112 struct page *rx_page; 113 struct cbuf cb; 114 int retries; 115 #define MAX_CONNECT_RETRIES 3 116 int sctp_assoc; 117 struct connection *othercon; 118 struct work_struct rwork; /* Receive workqueue */ 119 struct work_struct swork; /* Send workqueue */ 120 }; 121 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 122 123 /* An entry waiting to be sent */ 124 struct writequeue_entry { 125 struct list_head list; 126 struct page *page; 127 int offset; 128 int len; 129 int end; 130 int users; 131 struct connection *con; 132 }; 133 134 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 135 static int dlm_local_count; 136 137 /* Work queues */ 138 static struct workqueue_struct *recv_workqueue; 139 static struct workqueue_struct *send_workqueue; 140 141 static DEFINE_IDR(connections_idr); 142 static DEFINE_MUTEX(connections_lock); 143 static int max_nodeid; 144 static struct kmem_cache *con_cache; 145 146 static void process_recv_sockets(struct work_struct *work); 147 static void process_send_sockets(struct work_struct *work); 148 149 /* 150 * If 'allocation' is zero then we don't attempt to create a new 151 * connection structure for this node. 152 */ 153 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 154 { 155 struct connection *con = NULL; 156 int r; 157 int n; 158 159 con = idr_find(&connections_idr, nodeid); 160 if (con || !alloc) 161 return con; 162 163 r = idr_pre_get(&connections_idr, alloc); 164 if (!r) 165 return NULL; 166 167 con = kmem_cache_zalloc(con_cache, alloc); 168 if (!con) 169 return NULL; 170 171 r = idr_get_new_above(&connections_idr, con, nodeid, &n); 172 if (r) { 173 kmem_cache_free(con_cache, con); 174 return NULL; 175 } 176 177 if (n != nodeid) { 178 idr_remove(&connections_idr, n); 179 kmem_cache_free(con_cache, con); 180 return NULL; 181 } 182 183 con->nodeid = nodeid; 184 mutex_init(&con->sock_mutex); 185 INIT_LIST_HEAD(&con->writequeue); 186 spin_lock_init(&con->writequeue_lock); 187 INIT_WORK(&con->swork, process_send_sockets); 188 INIT_WORK(&con->rwork, process_recv_sockets); 189 190 /* Setup action pointers for child sockets */ 191 if (con->nodeid) { 192 struct connection *zerocon = idr_find(&connections_idr, 0); 193 194 con->connect_action = zerocon->connect_action; 195 if (!con->rx_action) 196 con->rx_action = zerocon->rx_action; 197 } 198 199 if (nodeid > max_nodeid) 200 max_nodeid = nodeid; 201 202 return con; 203 } 204 205 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 206 { 207 struct connection *con; 208 209 mutex_lock(&connections_lock); 210 con = __nodeid2con(nodeid, allocation); 211 mutex_unlock(&connections_lock); 212 213 return con; 214 } 215 216 /* This is a bit drastic, but only called when things go wrong */ 217 static struct connection *assoc2con(int assoc_id) 218 { 219 int i; 220 struct connection *con; 221 222 mutex_lock(&connections_lock); 223 for (i=0; i<=max_nodeid; i++) { 224 con = __nodeid2con(i, 0); 225 if (con && con->sctp_assoc == assoc_id) { 226 mutex_unlock(&connections_lock); 227 return con; 228 } 229 } 230 mutex_unlock(&connections_lock); 231 return NULL; 232 } 233 234 static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 235 { 236 struct sockaddr_storage addr; 237 int error; 238 239 if (!dlm_local_count) 240 return -1; 241 242 error = dlm_nodeid_to_addr(nodeid, &addr); 243 if (error) 244 return error; 245 246 if (dlm_local_addr[0]->ss_family == AF_INET) { 247 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 248 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 249 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 250 } else { 251 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 252 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 253 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 254 sizeof(in6->sin6_addr)); 255 } 256 257 return 0; 258 } 259 260 /* Data available on socket or listen socket received a connect */ 261 static void lowcomms_data_ready(struct sock *sk, int count_unused) 262 { 263 struct connection *con = sock2con(sk); 264 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 265 queue_work(recv_workqueue, &con->rwork); 266 } 267 268 static void lowcomms_write_space(struct sock *sk) 269 { 270 struct connection *con = sock2con(sk); 271 272 if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 273 queue_work(send_workqueue, &con->swork); 274 } 275 276 static inline void lowcomms_connect_sock(struct connection *con) 277 { 278 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 279 queue_work(send_workqueue, &con->swork); 280 } 281 282 static void lowcomms_state_change(struct sock *sk) 283 { 284 if (sk->sk_state == TCP_ESTABLISHED) 285 lowcomms_write_space(sk); 286 } 287 288 /* Make a socket active */ 289 static int add_sock(struct socket *sock, struct connection *con) 290 { 291 con->sock = sock; 292 293 /* Install a data_ready callback */ 294 con->sock->sk->sk_data_ready = lowcomms_data_ready; 295 con->sock->sk->sk_write_space = lowcomms_write_space; 296 con->sock->sk->sk_state_change = lowcomms_state_change; 297 con->sock->sk->sk_user_data = con; 298 return 0; 299 } 300 301 /* Add the port number to an IPv6 or 4 sockaddr and return the address 302 length */ 303 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 304 int *addr_len) 305 { 306 saddr->ss_family = dlm_local_addr[0]->ss_family; 307 if (saddr->ss_family == AF_INET) { 308 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 309 in4_addr->sin_port = cpu_to_be16(port); 310 *addr_len = sizeof(struct sockaddr_in); 311 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 312 } else { 313 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 314 in6_addr->sin6_port = cpu_to_be16(port); 315 *addr_len = sizeof(struct sockaddr_in6); 316 } 317 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 318 } 319 320 /* Close a remote connection and tidy up */ 321 static void close_connection(struct connection *con, bool and_other) 322 { 323 mutex_lock(&con->sock_mutex); 324 325 if (con->sock) { 326 sock_release(con->sock); 327 con->sock = NULL; 328 } 329 if (con->othercon && and_other) { 330 /* Will only re-enter once. */ 331 close_connection(con->othercon, false); 332 } 333 if (con->rx_page) { 334 __free_page(con->rx_page); 335 con->rx_page = NULL; 336 } 337 338 con->retries = 0; 339 mutex_unlock(&con->sock_mutex); 340 } 341 342 /* We only send shutdown messages to nodes that are not part of the cluster */ 343 static void sctp_send_shutdown(sctp_assoc_t associd) 344 { 345 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 346 struct msghdr outmessage; 347 struct cmsghdr *cmsg; 348 struct sctp_sndrcvinfo *sinfo; 349 int ret; 350 struct connection *con; 351 352 con = nodeid2con(0,0); 353 BUG_ON(con == NULL); 354 355 outmessage.msg_name = NULL; 356 outmessage.msg_namelen = 0; 357 outmessage.msg_control = outcmsg; 358 outmessage.msg_controllen = sizeof(outcmsg); 359 outmessage.msg_flags = MSG_EOR; 360 361 cmsg = CMSG_FIRSTHDR(&outmessage); 362 cmsg->cmsg_level = IPPROTO_SCTP; 363 cmsg->cmsg_type = SCTP_SNDRCV; 364 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 365 outmessage.msg_controllen = cmsg->cmsg_len; 366 sinfo = CMSG_DATA(cmsg); 367 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 368 369 sinfo->sinfo_flags |= MSG_EOF; 370 sinfo->sinfo_assoc_id = associd; 371 372 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0); 373 374 if (ret != 0) 375 log_print("send EOF to node failed: %d", ret); 376 } 377 378 /* INIT failed but we don't know which node... 379 restart INIT on all pending nodes */ 380 static void sctp_init_failed(void) 381 { 382 int i; 383 struct connection *con; 384 385 mutex_lock(&connections_lock); 386 for (i=1; i<=max_nodeid; i++) { 387 con = __nodeid2con(i, 0); 388 if (!con) 389 continue; 390 con->sctp_assoc = 0; 391 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 392 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 393 queue_work(send_workqueue, &con->swork); 394 } 395 } 396 } 397 mutex_unlock(&connections_lock); 398 } 399 400 /* Something happened to an association */ 401 static void process_sctp_notification(struct connection *con, 402 struct msghdr *msg, char *buf) 403 { 404 union sctp_notification *sn = (union sctp_notification *)buf; 405 406 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { 407 switch (sn->sn_assoc_change.sac_state) { 408 409 case SCTP_COMM_UP: 410 case SCTP_RESTART: 411 { 412 /* Check that the new node is in the lockspace */ 413 struct sctp_prim prim; 414 int nodeid; 415 int prim_len, ret; 416 int addr_len; 417 struct connection *new_con; 418 struct file *file; 419 sctp_peeloff_arg_t parg; 420 int parglen = sizeof(parg); 421 422 /* 423 * We get this before any data for an association. 424 * We verify that the node is in the cluster and 425 * then peel off a socket for it. 426 */ 427 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 428 log_print("COMM_UP for invalid assoc ID %d", 429 (int)sn->sn_assoc_change.sac_assoc_id); 430 sctp_init_failed(); 431 return; 432 } 433 memset(&prim, 0, sizeof(struct sctp_prim)); 434 prim_len = sizeof(struct sctp_prim); 435 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id; 436 437 ret = kernel_getsockopt(con->sock, 438 IPPROTO_SCTP, 439 SCTP_PRIMARY_ADDR, 440 (char*)&prim, 441 &prim_len); 442 if (ret < 0) { 443 log_print("getsockopt/sctp_primary_addr on " 444 "new assoc %d failed : %d", 445 (int)sn->sn_assoc_change.sac_assoc_id, 446 ret); 447 448 /* Retry INIT later */ 449 new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 450 if (new_con) 451 clear_bit(CF_CONNECT_PENDING, &con->flags); 452 return; 453 } 454 make_sockaddr(&prim.ssp_addr, 0, &addr_len); 455 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) { 456 int i; 457 unsigned char *b=(unsigned char *)&prim.ssp_addr; 458 log_print("reject connect from unknown addr"); 459 for (i=0; i<sizeof(struct sockaddr_storage);i++) 460 printk("%02x ", b[i]); 461 printk("\n"); 462 sctp_send_shutdown(prim.ssp_assoc_id); 463 return; 464 } 465 466 new_con = nodeid2con(nodeid, GFP_KERNEL); 467 if (!new_con) 468 return; 469 470 /* Peel off a new sock */ 471 parg.associd = sn->sn_assoc_change.sac_assoc_id; 472 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, 473 SCTP_SOCKOPT_PEELOFF, 474 (void *)&parg, &parglen); 475 if (ret) { 476 log_print("Can't peel off a socket for " 477 "connection %d to node %d: err=%d\n", 478 parg.associd, nodeid, ret); 479 } 480 file = fget(parg.sd); 481 new_con->sock = SOCKET_I(file->f_dentry->d_inode); 482 add_sock(new_con->sock, new_con); 483 fput(file); 484 put_unused_fd(parg.sd); 485 486 log_print("got new/restarted association %d nodeid %d", 487 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 488 489 /* Send any pending writes */ 490 clear_bit(CF_CONNECT_PENDING, &new_con->flags); 491 clear_bit(CF_INIT_PENDING, &con->flags); 492 if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { 493 queue_work(send_workqueue, &new_con->swork); 494 } 495 if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags)) 496 queue_work(recv_workqueue, &new_con->rwork); 497 } 498 break; 499 500 case SCTP_COMM_LOST: 501 case SCTP_SHUTDOWN_COMP: 502 { 503 con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 504 if (con) { 505 con->sctp_assoc = 0; 506 } 507 } 508 break; 509 510 /* We don't know which INIT failed, so clear the PENDING flags 511 * on them all. if assoc_id is zero then it will then try 512 * again */ 513 514 case SCTP_CANT_STR_ASSOC: 515 { 516 log_print("Can't start SCTP association - retrying"); 517 sctp_init_failed(); 518 } 519 break; 520 521 default: 522 log_print("unexpected SCTP assoc change id=%d state=%d", 523 (int)sn->sn_assoc_change.sac_assoc_id, 524 sn->sn_assoc_change.sac_state); 525 } 526 } 527 } 528 529 /* Data received from remote end */ 530 static int receive_from_sock(struct connection *con) 531 { 532 int ret = 0; 533 struct msghdr msg = {}; 534 struct kvec iov[2]; 535 unsigned len; 536 int r; 537 int call_again_soon = 0; 538 int nvec; 539 char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 540 541 mutex_lock(&con->sock_mutex); 542 543 if (con->sock == NULL) { 544 ret = -EAGAIN; 545 goto out_close; 546 } 547 548 if (con->rx_page == NULL) { 549 /* 550 * This doesn't need to be atomic, but I think it should 551 * improve performance if it is. 552 */ 553 con->rx_page = alloc_page(GFP_ATOMIC); 554 if (con->rx_page == NULL) 555 goto out_resched; 556 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 557 } 558 559 /* Only SCTP needs these really */ 560 memset(&incmsg, 0, sizeof(incmsg)); 561 msg.msg_control = incmsg; 562 msg.msg_controllen = sizeof(incmsg); 563 564 /* 565 * iov[0] is the bit of the circular buffer between the current end 566 * point (cb.base + cb.len) and the end of the buffer. 567 */ 568 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 569 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 570 iov[1].iov_len = 0; 571 nvec = 1; 572 573 /* 574 * iov[1] is the bit of the circular buffer between the start of the 575 * buffer and the start of the currently used section (cb.base) 576 */ 577 if (cbuf_data(&con->cb) >= con->cb.base) { 578 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 579 iov[1].iov_len = con->cb.base; 580 iov[1].iov_base = page_address(con->rx_page); 581 nvec = 2; 582 } 583 len = iov[0].iov_len + iov[1].iov_len; 584 585 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len, 586 MSG_DONTWAIT | MSG_NOSIGNAL); 587 if (ret <= 0) 588 goto out_close; 589 590 /* Process SCTP notifications */ 591 if (msg.msg_flags & MSG_NOTIFICATION) { 592 msg.msg_control = incmsg; 593 msg.msg_controllen = sizeof(incmsg); 594 595 process_sctp_notification(con, &msg, 596 page_address(con->rx_page) + con->cb.base); 597 mutex_unlock(&con->sock_mutex); 598 return 0; 599 } 600 BUG_ON(con->nodeid == 0); 601 602 if (ret == len) 603 call_again_soon = 1; 604 cbuf_add(&con->cb, ret); 605 ret = dlm_process_incoming_buffer(con->nodeid, 606 page_address(con->rx_page), 607 con->cb.base, con->cb.len, 608 PAGE_CACHE_SIZE); 609 if (ret == -EBADMSG) { 610 log_print("lowcomms: addr=%p, base=%u, len=%u, " 611 "iov_len=%u, iov_base[0]=%p, read=%d", 612 page_address(con->rx_page), con->cb.base, con->cb.len, 613 len, iov[0].iov_base, r); 614 } 615 if (ret < 0) 616 goto out_close; 617 cbuf_eat(&con->cb, ret); 618 619 if (cbuf_empty(&con->cb) && !call_again_soon) { 620 __free_page(con->rx_page); 621 con->rx_page = NULL; 622 } 623 624 if (call_again_soon) 625 goto out_resched; 626 mutex_unlock(&con->sock_mutex); 627 return 0; 628 629 out_resched: 630 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 631 queue_work(recv_workqueue, &con->rwork); 632 mutex_unlock(&con->sock_mutex); 633 return -EAGAIN; 634 635 out_close: 636 mutex_unlock(&con->sock_mutex); 637 if (ret != -EAGAIN) { 638 close_connection(con, false); 639 /* Reconnect when there is something to send */ 640 } 641 /* Don't return success if we really got EOF */ 642 if (ret == 0) 643 ret = -EAGAIN; 644 645 return ret; 646 } 647 648 /* Listening socket is busy, accept a connection */ 649 static int tcp_accept_from_sock(struct connection *con) 650 { 651 int result; 652 struct sockaddr_storage peeraddr; 653 struct socket *newsock; 654 int len; 655 int nodeid; 656 struct connection *newcon; 657 struct connection *addcon; 658 659 memset(&peeraddr, 0, sizeof(peeraddr)); 660 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 661 IPPROTO_TCP, &newsock); 662 if (result < 0) 663 return -ENOMEM; 664 665 mutex_lock_nested(&con->sock_mutex, 0); 666 667 result = -ENOTCONN; 668 if (con->sock == NULL) 669 goto accept_err; 670 671 newsock->type = con->sock->type; 672 newsock->ops = con->sock->ops; 673 674 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 675 if (result < 0) 676 goto accept_err; 677 678 /* Get the connected socket's peer */ 679 memset(&peeraddr, 0, sizeof(peeraddr)); 680 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 681 &len, 2)) { 682 result = -ECONNABORTED; 683 goto accept_err; 684 } 685 686 /* Get the new node's NODEID */ 687 make_sockaddr(&peeraddr, 0, &len); 688 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 689 log_print("connect from non cluster node"); 690 sock_release(newsock); 691 mutex_unlock(&con->sock_mutex); 692 return -1; 693 } 694 695 log_print("got connection from %d", nodeid); 696 697 /* Check to see if we already have a connection to this node. This 698 * could happen if the two nodes initiate a connection at roughly 699 * the same time and the connections cross on the wire. 700 * In this case we store the incoming one in "othercon" 701 */ 702 newcon = nodeid2con(nodeid, GFP_KERNEL); 703 if (!newcon) { 704 result = -ENOMEM; 705 goto accept_err; 706 } 707 mutex_lock_nested(&newcon->sock_mutex, 1); 708 if (newcon->sock) { 709 struct connection *othercon = newcon->othercon; 710 711 if (!othercon) { 712 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); 713 if (!othercon) { 714 log_print("failed to allocate incoming socket"); 715 mutex_unlock(&newcon->sock_mutex); 716 result = -ENOMEM; 717 goto accept_err; 718 } 719 othercon->nodeid = nodeid; 720 othercon->rx_action = receive_from_sock; 721 mutex_init(&othercon->sock_mutex); 722 INIT_WORK(&othercon->swork, process_send_sockets); 723 INIT_WORK(&othercon->rwork, process_recv_sockets); 724 set_bit(CF_IS_OTHERCON, &othercon->flags); 725 } 726 if (!othercon->sock) { 727 newcon->othercon = othercon; 728 othercon->sock = newsock; 729 newsock->sk->sk_user_data = othercon; 730 add_sock(newsock, othercon); 731 addcon = othercon; 732 } 733 else { 734 printk("Extra connection from node %d attempted\n", nodeid); 735 result = -EAGAIN; 736 mutex_unlock(&newcon->sock_mutex); 737 goto accept_err; 738 } 739 } 740 else { 741 newsock->sk->sk_user_data = newcon; 742 newcon->rx_action = receive_from_sock; 743 add_sock(newsock, newcon); 744 addcon = newcon; 745 } 746 747 mutex_unlock(&newcon->sock_mutex); 748 749 /* 750 * Add it to the active queue in case we got data 751 * beween processing the accept adding the socket 752 * to the read_sockets list 753 */ 754 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 755 queue_work(recv_workqueue, &addcon->rwork); 756 mutex_unlock(&con->sock_mutex); 757 758 return 0; 759 760 accept_err: 761 mutex_unlock(&con->sock_mutex); 762 sock_release(newsock); 763 764 if (result != -EAGAIN) 765 log_print("error accepting connection from node: %d", result); 766 return result; 767 } 768 769 static void free_entry(struct writequeue_entry *e) 770 { 771 __free_page(e->page); 772 kfree(e); 773 } 774 775 /* Initiate an SCTP association. 776 This is a special case of send_to_sock() in that we don't yet have a 777 peeled-off socket for this association, so we use the listening socket 778 and add the primary IP address of the remote node. 779 */ 780 static void sctp_init_assoc(struct connection *con) 781 { 782 struct sockaddr_storage rem_addr; 783 char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 784 struct msghdr outmessage; 785 struct cmsghdr *cmsg; 786 struct sctp_sndrcvinfo *sinfo; 787 struct connection *base_con; 788 struct writequeue_entry *e; 789 int len, offset; 790 int ret; 791 int addrlen; 792 struct kvec iov[1]; 793 794 if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) 795 return; 796 797 if (con->retries++ > MAX_CONNECT_RETRIES) 798 return; 799 800 log_print("Initiating association with node %d", con->nodeid); 801 802 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) { 803 log_print("no address for nodeid %d", con->nodeid); 804 return; 805 } 806 base_con = nodeid2con(0, 0); 807 BUG_ON(base_con == NULL); 808 809 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen); 810 811 outmessage.msg_name = &rem_addr; 812 outmessage.msg_namelen = addrlen; 813 outmessage.msg_control = outcmsg; 814 outmessage.msg_controllen = sizeof(outcmsg); 815 outmessage.msg_flags = MSG_EOR; 816 817 spin_lock(&con->writequeue_lock); 818 e = list_entry(con->writequeue.next, struct writequeue_entry, 819 list); 820 821 BUG_ON((struct list_head *) e == &con->writequeue); 822 823 len = e->len; 824 offset = e->offset; 825 spin_unlock(&con->writequeue_lock); 826 kmap(e->page); 827 828 /* Send the first block off the write queue */ 829 iov[0].iov_base = page_address(e->page)+offset; 830 iov[0].iov_len = len; 831 832 cmsg = CMSG_FIRSTHDR(&outmessage); 833 cmsg->cmsg_level = IPPROTO_SCTP; 834 cmsg->cmsg_type = SCTP_SNDRCV; 835 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 836 sinfo = CMSG_DATA(cmsg); 837 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 838 sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); 839 outmessage.msg_controllen = cmsg->cmsg_len; 840 841 ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); 842 if (ret < 0) { 843 log_print("Send first packet to node %d failed: %d", 844 con->nodeid, ret); 845 846 /* Try again later */ 847 clear_bit(CF_CONNECT_PENDING, &con->flags); 848 clear_bit(CF_INIT_PENDING, &con->flags); 849 } 850 else { 851 spin_lock(&con->writequeue_lock); 852 e->offset += ret; 853 e->len -= ret; 854 855 if (e->len == 0 && e->users == 0) { 856 list_del(&e->list); 857 kunmap(e->page); 858 free_entry(e); 859 } 860 spin_unlock(&con->writequeue_lock); 861 } 862 } 863 864 /* Connect a new socket to its peer */ 865 static void tcp_connect_to_sock(struct connection *con) 866 { 867 int result = -EHOSTUNREACH; 868 struct sockaddr_storage saddr, src_addr; 869 int addr_len; 870 struct socket *sock; 871 872 if (con->nodeid == 0) { 873 log_print("attempt to connect sock 0 foiled"); 874 return; 875 } 876 877 mutex_lock(&con->sock_mutex); 878 if (con->retries++ > MAX_CONNECT_RETRIES) 879 goto out; 880 881 /* Some odd races can cause double-connects, ignore them */ 882 if (con->sock) { 883 result = 0; 884 goto out; 885 } 886 887 /* Create a socket to communicate with */ 888 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 889 IPPROTO_TCP, &sock); 890 if (result < 0) 891 goto out_err; 892 893 memset(&saddr, 0, sizeof(saddr)); 894 if (dlm_nodeid_to_addr(con->nodeid, &saddr)) { 895 sock_release(sock); 896 goto out_err; 897 } 898 899 sock->sk->sk_user_data = con; 900 con->rx_action = receive_from_sock; 901 con->connect_action = tcp_connect_to_sock; 902 add_sock(sock, con); 903 904 /* Bind to our cluster-known address connecting to avoid 905 routing problems */ 906 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 907 make_sockaddr(&src_addr, 0, &addr_len); 908 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, 909 addr_len); 910 if (result < 0) { 911 log_print("could not bind for connect: %d", result); 912 /* This *may* not indicate a critical error */ 913 } 914 915 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 916 917 log_print("connecting to %d", con->nodeid); 918 result = 919 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 920 O_NONBLOCK); 921 if (result == -EINPROGRESS) 922 result = 0; 923 if (result == 0) 924 goto out; 925 926 out_err: 927 if (con->sock) { 928 sock_release(con->sock); 929 con->sock = NULL; 930 } 931 /* 932 * Some errors are fatal and this list might need adjusting. For other 933 * errors we try again until the max number of retries is reached. 934 */ 935 if (result != -EHOSTUNREACH && result != -ENETUNREACH && 936 result != -ENETDOWN && result != -EINVAL 937 && result != -EPROTONOSUPPORT) { 938 lowcomms_connect_sock(con); 939 result = 0; 940 } 941 out: 942 mutex_unlock(&con->sock_mutex); 943 return; 944 } 945 946 static struct socket *tcp_create_listen_sock(struct connection *con, 947 struct sockaddr_storage *saddr) 948 { 949 struct socket *sock = NULL; 950 int result = 0; 951 int one = 1; 952 int addr_len; 953 954 if (dlm_local_addr[0]->ss_family == AF_INET) 955 addr_len = sizeof(struct sockaddr_in); 956 else 957 addr_len = sizeof(struct sockaddr_in6); 958 959 /* Create a socket to communicate with */ 960 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 961 IPPROTO_TCP, &sock); 962 if (result < 0) { 963 log_print("Can't create listening comms socket"); 964 goto create_out; 965 } 966 967 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 968 (char *)&one, sizeof(one)); 969 970 if (result < 0) { 971 log_print("Failed to set SO_REUSEADDR on socket: %d", result); 972 } 973 sock->sk->sk_user_data = con; 974 con->rx_action = tcp_accept_from_sock; 975 con->connect_action = tcp_connect_to_sock; 976 con->sock = sock; 977 978 /* Bind to our port */ 979 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 980 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 981 if (result < 0) { 982 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 983 sock_release(sock); 984 sock = NULL; 985 con->sock = NULL; 986 goto create_out; 987 } 988 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 989 (char *)&one, sizeof(one)); 990 if (result < 0) { 991 log_print("Set keepalive failed: %d", result); 992 } 993 994 result = sock->ops->listen(sock, 5); 995 if (result < 0) { 996 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 997 sock_release(sock); 998 sock = NULL; 999 goto create_out; 1000 } 1001 1002 create_out: 1003 return sock; 1004 } 1005 1006 /* Get local addresses */ 1007 static void init_local(void) 1008 { 1009 struct sockaddr_storage sas, *addr; 1010 int i; 1011 1012 dlm_local_count = 0; 1013 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) { 1014 if (dlm_our_addr(&sas, i)) 1015 break; 1016 1017 addr = kmalloc(sizeof(*addr), GFP_KERNEL); 1018 if (!addr) 1019 break; 1020 memcpy(addr, &sas, sizeof(*addr)); 1021 dlm_local_addr[dlm_local_count++] = addr; 1022 } 1023 } 1024 1025 /* Bind to an IP address. SCTP allows multiple address so it can do 1026 multi-homing */ 1027 static int add_sctp_bind_addr(struct connection *sctp_con, 1028 struct sockaddr_storage *addr, 1029 int addr_len, int num) 1030 { 1031 int result = 0; 1032 1033 if (num == 1) 1034 result = kernel_bind(sctp_con->sock, 1035 (struct sockaddr *) addr, 1036 addr_len); 1037 else 1038 result = kernel_setsockopt(sctp_con->sock, SOL_SCTP, 1039 SCTP_SOCKOPT_BINDX_ADD, 1040 (char *)addr, addr_len); 1041 1042 if (result < 0) 1043 log_print("Can't bind to port %d addr number %d", 1044 dlm_config.ci_tcp_port, num); 1045 1046 return result; 1047 } 1048 1049 /* Initialise SCTP socket and bind to all interfaces */ 1050 static int sctp_listen_for_all(void) 1051 { 1052 struct socket *sock = NULL; 1053 struct sockaddr_storage localaddr; 1054 struct sctp_event_subscribe subscribe; 1055 int result = -EINVAL, num = 1, i, addr_len; 1056 struct connection *con = nodeid2con(0, GFP_KERNEL); 1057 int bufsize = NEEDED_RMEM; 1058 1059 if (!con) 1060 return -ENOMEM; 1061 1062 log_print("Using SCTP for communications"); 1063 1064 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET, 1065 IPPROTO_SCTP, &sock); 1066 if (result < 0) { 1067 log_print("Can't create comms socket, check SCTP is loaded"); 1068 goto out; 1069 } 1070 1071 /* Listen for events */ 1072 memset(&subscribe, 0, sizeof(subscribe)); 1073 subscribe.sctp_data_io_event = 1; 1074 subscribe.sctp_association_event = 1; 1075 subscribe.sctp_send_failure_event = 1; 1076 subscribe.sctp_shutdown_event = 1; 1077 subscribe.sctp_partial_delivery_event = 1; 1078 1079 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE, 1080 (char *)&bufsize, sizeof(bufsize)); 1081 if (result) 1082 log_print("Error increasing buffer space on socket %d", result); 1083 1084 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS, 1085 (char *)&subscribe, sizeof(subscribe)); 1086 if (result < 0) { 1087 log_print("Failed to set SCTP_EVENTS on socket: result=%d", 1088 result); 1089 goto create_delsock; 1090 } 1091 1092 /* Init con struct */ 1093 sock->sk->sk_user_data = con; 1094 con->sock = sock; 1095 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1096 con->rx_action = receive_from_sock; 1097 con->connect_action = sctp_init_assoc; 1098 1099 /* Bind to all interfaces. */ 1100 for (i = 0; i < dlm_local_count; i++) { 1101 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1102 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len); 1103 1104 result = add_sctp_bind_addr(con, &localaddr, addr_len, num); 1105 if (result) 1106 goto create_delsock; 1107 ++num; 1108 } 1109 1110 result = sock->ops->listen(sock, 5); 1111 if (result < 0) { 1112 log_print("Can't set socket listening"); 1113 goto create_delsock; 1114 } 1115 1116 return 0; 1117 1118 create_delsock: 1119 sock_release(sock); 1120 con->sock = NULL; 1121 out: 1122 return result; 1123 } 1124 1125 static int tcp_listen_for_all(void) 1126 { 1127 struct socket *sock = NULL; 1128 struct connection *con = nodeid2con(0, GFP_KERNEL); 1129 int result = -EINVAL; 1130 1131 if (!con) 1132 return -ENOMEM; 1133 1134 /* We don't support multi-homed hosts */ 1135 if (dlm_local_addr[1] != NULL) { 1136 log_print("TCP protocol can't handle multi-homed hosts, " 1137 "try SCTP"); 1138 return -EINVAL; 1139 } 1140 1141 log_print("Using TCP for communications"); 1142 1143 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1144 if (sock) { 1145 add_sock(sock, con); 1146 result = 0; 1147 } 1148 else { 1149 result = -EADDRINUSE; 1150 } 1151 1152 return result; 1153 } 1154 1155 1156 1157 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1158 gfp_t allocation) 1159 { 1160 struct writequeue_entry *entry; 1161 1162 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1163 if (!entry) 1164 return NULL; 1165 1166 entry->page = alloc_page(allocation); 1167 if (!entry->page) { 1168 kfree(entry); 1169 return NULL; 1170 } 1171 1172 entry->offset = 0; 1173 entry->len = 0; 1174 entry->end = 0; 1175 entry->users = 0; 1176 entry->con = con; 1177 1178 return entry; 1179 } 1180 1181 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1182 { 1183 struct connection *con; 1184 struct writequeue_entry *e; 1185 int offset = 0; 1186 int users = 0; 1187 1188 con = nodeid2con(nodeid, allocation); 1189 if (!con) 1190 return NULL; 1191 1192 spin_lock(&con->writequeue_lock); 1193 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1194 if ((&e->list == &con->writequeue) || 1195 (PAGE_CACHE_SIZE - e->end < len)) { 1196 e = NULL; 1197 } else { 1198 offset = e->end; 1199 e->end += len; 1200 users = e->users++; 1201 } 1202 spin_unlock(&con->writequeue_lock); 1203 1204 if (e) { 1205 got_one: 1206 if (users == 0) 1207 kmap(e->page); 1208 *ppc = page_address(e->page) + offset; 1209 return e; 1210 } 1211 1212 e = new_writequeue_entry(con, allocation); 1213 if (e) { 1214 spin_lock(&con->writequeue_lock); 1215 offset = e->end; 1216 e->end += len; 1217 users = e->users++; 1218 list_add_tail(&e->list, &con->writequeue); 1219 spin_unlock(&con->writequeue_lock); 1220 goto got_one; 1221 } 1222 return NULL; 1223 } 1224 1225 void dlm_lowcomms_commit_buffer(void *mh) 1226 { 1227 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1228 struct connection *con = e->con; 1229 int users; 1230 1231 spin_lock(&con->writequeue_lock); 1232 users = --e->users; 1233 if (users) 1234 goto out; 1235 e->len = e->end - e->offset; 1236 kunmap(e->page); 1237 spin_unlock(&con->writequeue_lock); 1238 1239 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 1240 queue_work(send_workqueue, &con->swork); 1241 } 1242 return; 1243 1244 out: 1245 spin_unlock(&con->writequeue_lock); 1246 return; 1247 } 1248 1249 /* Send a message */ 1250 static void send_to_sock(struct connection *con) 1251 { 1252 int ret = 0; 1253 ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int); 1254 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1255 struct writequeue_entry *e; 1256 int len, offset; 1257 1258 mutex_lock(&con->sock_mutex); 1259 if (con->sock == NULL) 1260 goto out_connect; 1261 1262 sendpage = con->sock->ops->sendpage; 1263 1264 spin_lock(&con->writequeue_lock); 1265 for (;;) { 1266 e = list_entry(con->writequeue.next, struct writequeue_entry, 1267 list); 1268 if ((struct list_head *) e == &con->writequeue) 1269 break; 1270 1271 len = e->len; 1272 offset = e->offset; 1273 BUG_ON(len == 0 && e->users == 0); 1274 spin_unlock(&con->writequeue_lock); 1275 kmap(e->page); 1276 1277 ret = 0; 1278 if (len) { 1279 ret = sendpage(con->sock, e->page, offset, len, 1280 msg_flags); 1281 if (ret == -EAGAIN || ret == 0) { 1282 cond_resched(); 1283 goto out; 1284 } 1285 if (ret <= 0) 1286 goto send_error; 1287 } 1288 /* Don't starve people filling buffers */ 1289 cond_resched(); 1290 1291 spin_lock(&con->writequeue_lock); 1292 e->offset += ret; 1293 e->len -= ret; 1294 1295 if (e->len == 0 && e->users == 0) { 1296 list_del(&e->list); 1297 kunmap(e->page); 1298 free_entry(e); 1299 continue; 1300 } 1301 } 1302 spin_unlock(&con->writequeue_lock); 1303 out: 1304 mutex_unlock(&con->sock_mutex); 1305 return; 1306 1307 send_error: 1308 mutex_unlock(&con->sock_mutex); 1309 close_connection(con, false); 1310 lowcomms_connect_sock(con); 1311 return; 1312 1313 out_connect: 1314 mutex_unlock(&con->sock_mutex); 1315 if (!test_bit(CF_INIT_PENDING, &con->flags)) 1316 lowcomms_connect_sock(con); 1317 return; 1318 } 1319 1320 static void clean_one_writequeue(struct connection *con) 1321 { 1322 struct list_head *list; 1323 struct list_head *temp; 1324 1325 spin_lock(&con->writequeue_lock); 1326 list_for_each_safe(list, temp, &con->writequeue) { 1327 struct writequeue_entry *e = 1328 list_entry(list, struct writequeue_entry, list); 1329 list_del(&e->list); 1330 free_entry(e); 1331 } 1332 spin_unlock(&con->writequeue_lock); 1333 } 1334 1335 /* Called from recovery when it knows that a node has 1336 left the cluster */ 1337 int dlm_lowcomms_close(int nodeid) 1338 { 1339 struct connection *con; 1340 1341 log_print("closing connection to node %d", nodeid); 1342 con = nodeid2con(nodeid, 0); 1343 if (con) { 1344 clean_one_writequeue(con); 1345 close_connection(con, true); 1346 } 1347 return 0; 1348 } 1349 1350 /* Receive workqueue function */ 1351 static void process_recv_sockets(struct work_struct *work) 1352 { 1353 struct connection *con = container_of(work, struct connection, rwork); 1354 int err; 1355 1356 clear_bit(CF_READ_PENDING, &con->flags); 1357 do { 1358 err = con->rx_action(con); 1359 } while (!err); 1360 } 1361 1362 /* Send workqueue function */ 1363 static void process_send_sockets(struct work_struct *work) 1364 { 1365 struct connection *con = container_of(work, struct connection, swork); 1366 1367 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 1368 con->connect_action(con); 1369 } 1370 clear_bit(CF_WRITE_PENDING, &con->flags); 1371 send_to_sock(con); 1372 } 1373 1374 1375 /* Discard all entries on the write queues */ 1376 static void clean_writequeues(void) 1377 { 1378 int nodeid; 1379 1380 for (nodeid = 1; nodeid <= max_nodeid; nodeid++) { 1381 struct connection *con = __nodeid2con(nodeid, 0); 1382 1383 if (con) 1384 clean_one_writequeue(con); 1385 } 1386 } 1387 1388 static void work_stop(void) 1389 { 1390 destroy_workqueue(recv_workqueue); 1391 destroy_workqueue(send_workqueue); 1392 } 1393 1394 static int work_start(void) 1395 { 1396 int error; 1397 recv_workqueue = create_workqueue("dlm_recv"); 1398 error = IS_ERR(recv_workqueue); 1399 if (error) { 1400 log_print("can't start dlm_recv %d", error); 1401 return error; 1402 } 1403 1404 send_workqueue = create_singlethread_workqueue("dlm_send"); 1405 error = IS_ERR(send_workqueue); 1406 if (error) { 1407 log_print("can't start dlm_send %d", error); 1408 destroy_workqueue(recv_workqueue); 1409 return error; 1410 } 1411 1412 return 0; 1413 } 1414 1415 void dlm_lowcomms_stop(void) 1416 { 1417 int i; 1418 struct connection *con; 1419 1420 /* Set all the flags to prevent any 1421 socket activity. 1422 */ 1423 mutex_lock(&connections_lock); 1424 for (i = 0; i <= max_nodeid; i++) { 1425 con = __nodeid2con(i, 0); 1426 if (con) { 1427 con->flags |= 0x0F; 1428 if (con->sock) 1429 con->sock->sk->sk_user_data = NULL; 1430 } 1431 } 1432 mutex_unlock(&connections_lock); 1433 1434 work_stop(); 1435 1436 mutex_lock(&connections_lock); 1437 clean_writequeues(); 1438 1439 for (i = 0; i <= max_nodeid; i++) { 1440 con = __nodeid2con(i, 0); 1441 if (con) { 1442 close_connection(con, true); 1443 if (con->othercon) 1444 kmem_cache_free(con_cache, con->othercon); 1445 kmem_cache_free(con_cache, con); 1446 } 1447 } 1448 max_nodeid = 0; 1449 mutex_unlock(&connections_lock); 1450 kmem_cache_destroy(con_cache); 1451 idr_init(&connections_idr); 1452 } 1453 1454 int dlm_lowcomms_start(void) 1455 { 1456 int error = -EINVAL; 1457 struct connection *con; 1458 1459 init_local(); 1460 if (!dlm_local_count) { 1461 error = -ENOTCONN; 1462 log_print("no local IP address has been set"); 1463 goto out; 1464 } 1465 1466 error = -ENOMEM; 1467 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1468 __alignof__(struct connection), 0, 1469 NULL); 1470 if (!con_cache) 1471 goto out; 1472 1473 /* Start listening */ 1474 if (dlm_config.ci_protocol == 0) 1475 error = tcp_listen_for_all(); 1476 else 1477 error = sctp_listen_for_all(); 1478 if (error) 1479 goto fail_unlisten; 1480 1481 error = work_start(); 1482 if (error) 1483 goto fail_unlisten; 1484 1485 return 0; 1486 1487 fail_unlisten: 1488 con = nodeid2con(0,0); 1489 if (con) { 1490 close_connection(con, false); 1491 kmem_cache_free(con_cache, con); 1492 } 1493 kmem_cache_destroy(con_cache); 1494 1495 out: 1496 return error; 1497 } 1498