1 /****************************************************************************** 2 ******************************************************************************* 3 ** 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 6 ** 7 ** This copyrighted material is made available to anyone wishing to use, 8 ** modify, copy, or redistribute it subject to the terms and conditions 9 ** of the GNU General Public License v.2. 10 ** 11 ******************************************************************************* 12 ******************************************************************************/ 13 14 /* 15 * lowcomms.c 16 * 17 * This is the "low-level" comms layer. 18 * 19 * It is responsible for sending/receiving messages 20 * from other nodes in the cluster. 21 * 22 * Cluster nodes are referred to by their nodeids. nodeids are 23 * simply 32 bit numbers to the locking module - if they need to 24 * be expanded for the cluster infrastructure then that is it's 25 * responsibility. It is this layer's 26 * responsibility to resolve these into IP address or 27 * whatever it needs for inter-node communication. 28 * 29 * The comms level is two kernel threads that deal mainly with 30 * the receiving of messages from other nodes and passing them 31 * up to the mid-level comms layer (which understands the 32 * message format) for execution by the locking core, and 33 * a send thread which does all the setting up of connections 34 * to remote nodes and the sending of data. Threads are not allowed 35 * to send their own data because it may cause them to wait in times 36 * of high load. Also, this way, the sending thread can collect together 37 * messages bound for one node and send them in one block. 38 * 39 * lowcomms will choose to use wither TCP or SCTP as its transport layer 40 * depending on the configuration variable 'protocol'. This should be set 41 * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 43 * for the DLM to function. 44 * 45 */ 46 47 #include <asm/ioctls.h> 48 #include <net/sock.h> 49 #include <net/tcp.h> 50 #include <linux/pagemap.h> 51 #include <linux/idr.h> 52 #include <linux/file.h> 53 #include <linux/sctp.h> 54 #include <net/sctp/user.h> 55 56 #include "dlm_internal.h" 57 #include "lowcomms.h" 58 #include "midcomms.h" 59 #include "config.h" 60 61 #define NEEDED_RMEM (4*1024*1024) 62 63 struct cbuf { 64 unsigned int base; 65 unsigned int len; 66 unsigned int mask; 67 }; 68 69 static void cbuf_add(struct cbuf *cb, int n) 70 { 71 cb->len += n; 72 } 73 74 static int cbuf_data(struct cbuf *cb) 75 { 76 return ((cb->base + cb->len) & cb->mask); 77 } 78 79 static void cbuf_init(struct cbuf *cb, int size) 80 { 81 cb->base = cb->len = 0; 82 cb->mask = size-1; 83 } 84 85 static void cbuf_eat(struct cbuf *cb, int n) 86 { 87 cb->len -= n; 88 cb->base += n; 89 cb->base &= cb->mask; 90 } 91 92 static bool cbuf_empty(struct cbuf *cb) 93 { 94 return cb->len == 0; 95 } 96 97 struct connection { 98 struct socket *sock; /* NULL if not connected */ 99 uint32_t nodeid; /* So we know who we are in the list */ 100 struct mutex sock_mutex; 101 unsigned long flags; 102 #define CF_READ_PENDING 1 103 #define CF_WRITE_PENDING 2 104 #define CF_CONNECT_PENDING 3 105 #define CF_INIT_PENDING 4 106 #define CF_IS_OTHERCON 5 107 struct list_head writequeue; /* List of outgoing writequeue_entries */ 108 spinlock_t writequeue_lock; 109 int (*rx_action) (struct connection *); /* What to do when active */ 110 void (*connect_action) (struct connection *); /* What to do to connect */ 111 struct page *rx_page; 112 struct cbuf cb; 113 int retries; 114 #define MAX_CONNECT_RETRIES 3 115 int sctp_assoc; 116 struct connection *othercon; 117 struct work_struct rwork; /* Receive workqueue */ 118 struct work_struct swork; /* Send workqueue */ 119 }; 120 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 121 122 /* An entry waiting to be sent */ 123 struct writequeue_entry { 124 struct list_head list; 125 struct page *page; 126 int offset; 127 int len; 128 int end; 129 int users; 130 struct connection *con; 131 }; 132 133 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 134 static int dlm_local_count; 135 136 /* Work queues */ 137 static struct workqueue_struct *recv_workqueue; 138 static struct workqueue_struct *send_workqueue; 139 140 static DEFINE_IDR(connections_idr); 141 static DECLARE_MUTEX(connections_lock); 142 static int max_nodeid; 143 static struct kmem_cache *con_cache; 144 145 static void process_recv_sockets(struct work_struct *work); 146 static void process_send_sockets(struct work_struct *work); 147 148 /* 149 * If 'allocation' is zero then we don't attempt to create a new 150 * connection structure for this node. 151 */ 152 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 153 { 154 struct connection *con = NULL; 155 int r; 156 int n; 157 158 con = idr_find(&connections_idr, nodeid); 159 if (con || !alloc) 160 return con; 161 162 r = idr_pre_get(&connections_idr, alloc); 163 if (!r) 164 return NULL; 165 166 con = kmem_cache_zalloc(con_cache, alloc); 167 if (!con) 168 return NULL; 169 170 r = idr_get_new_above(&connections_idr, con, nodeid, &n); 171 if (r) { 172 kmem_cache_free(con_cache, con); 173 return NULL; 174 } 175 176 if (n != nodeid) { 177 idr_remove(&connections_idr, n); 178 kmem_cache_free(con_cache, con); 179 return NULL; 180 } 181 182 con->nodeid = nodeid; 183 mutex_init(&con->sock_mutex); 184 INIT_LIST_HEAD(&con->writequeue); 185 spin_lock_init(&con->writequeue_lock); 186 INIT_WORK(&con->swork, process_send_sockets); 187 INIT_WORK(&con->rwork, process_recv_sockets); 188 189 /* Setup action pointers for child sockets */ 190 if (con->nodeid) { 191 struct connection *zerocon = idr_find(&connections_idr, 0); 192 193 con->connect_action = zerocon->connect_action; 194 if (!con->rx_action) 195 con->rx_action = zerocon->rx_action; 196 } 197 198 if (nodeid > max_nodeid) 199 max_nodeid = nodeid; 200 201 return con; 202 } 203 204 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 205 { 206 struct connection *con; 207 208 down(&connections_lock); 209 con = __nodeid2con(nodeid, allocation); 210 up(&connections_lock); 211 212 return con; 213 } 214 215 /* This is a bit drastic, but only called when things go wrong */ 216 static struct connection *assoc2con(int assoc_id) 217 { 218 int i; 219 struct connection *con; 220 221 down(&connections_lock); 222 for (i=0; i<=max_nodeid; i++) { 223 con = __nodeid2con(i, 0); 224 if (con && con->sctp_assoc == assoc_id) { 225 up(&connections_lock); 226 return con; 227 } 228 } 229 up(&connections_lock); 230 return NULL; 231 } 232 233 static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 234 { 235 struct sockaddr_storage addr; 236 int error; 237 238 if (!dlm_local_count) 239 return -1; 240 241 error = dlm_nodeid_to_addr(nodeid, &addr); 242 if (error) 243 return error; 244 245 if (dlm_local_addr[0]->ss_family == AF_INET) { 246 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 247 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 248 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 249 } else { 250 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 251 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 252 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 253 sizeof(in6->sin6_addr)); 254 } 255 256 return 0; 257 } 258 259 /* Data available on socket or listen socket received a connect */ 260 static void lowcomms_data_ready(struct sock *sk, int count_unused) 261 { 262 struct connection *con = sock2con(sk); 263 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 264 queue_work(recv_workqueue, &con->rwork); 265 } 266 267 static void lowcomms_write_space(struct sock *sk) 268 { 269 struct connection *con = sock2con(sk); 270 271 if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 272 queue_work(send_workqueue, &con->swork); 273 } 274 275 static inline void lowcomms_connect_sock(struct connection *con) 276 { 277 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 278 queue_work(send_workqueue, &con->swork); 279 } 280 281 static void lowcomms_state_change(struct sock *sk) 282 { 283 if (sk->sk_state == TCP_ESTABLISHED) 284 lowcomms_write_space(sk); 285 } 286 287 /* Make a socket active */ 288 static int add_sock(struct socket *sock, struct connection *con) 289 { 290 con->sock = sock; 291 292 /* Install a data_ready callback */ 293 con->sock->sk->sk_data_ready = lowcomms_data_ready; 294 con->sock->sk->sk_write_space = lowcomms_write_space; 295 con->sock->sk->sk_state_change = lowcomms_state_change; 296 con->sock->sk->sk_user_data = con; 297 return 0; 298 } 299 300 /* Add the port number to an IPv6 or 4 sockaddr and return the address 301 length */ 302 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 303 int *addr_len) 304 { 305 saddr->ss_family = dlm_local_addr[0]->ss_family; 306 if (saddr->ss_family == AF_INET) { 307 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 308 in4_addr->sin_port = cpu_to_be16(port); 309 *addr_len = sizeof(struct sockaddr_in); 310 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 311 } else { 312 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 313 in6_addr->sin6_port = cpu_to_be16(port); 314 *addr_len = sizeof(struct sockaddr_in6); 315 } 316 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 317 } 318 319 /* Close a remote connection and tidy up */ 320 static void close_connection(struct connection *con, bool and_other) 321 { 322 mutex_lock(&con->sock_mutex); 323 324 if (con->sock) { 325 sock_release(con->sock); 326 con->sock = NULL; 327 } 328 if (con->othercon && and_other) { 329 /* Will only re-enter once. */ 330 close_connection(con->othercon, false); 331 } 332 if (con->rx_page) { 333 __free_page(con->rx_page); 334 con->rx_page = NULL; 335 } 336 337 con->retries = 0; 338 mutex_unlock(&con->sock_mutex); 339 } 340 341 /* We only send shutdown messages to nodes that are not part of the cluster */ 342 static void sctp_send_shutdown(sctp_assoc_t associd) 343 { 344 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 345 struct msghdr outmessage; 346 struct cmsghdr *cmsg; 347 struct sctp_sndrcvinfo *sinfo; 348 int ret; 349 struct connection *con; 350 351 con = nodeid2con(0,0); 352 BUG_ON(con == NULL); 353 354 outmessage.msg_name = NULL; 355 outmessage.msg_namelen = 0; 356 outmessage.msg_control = outcmsg; 357 outmessage.msg_controllen = sizeof(outcmsg); 358 outmessage.msg_flags = MSG_EOR; 359 360 cmsg = CMSG_FIRSTHDR(&outmessage); 361 cmsg->cmsg_level = IPPROTO_SCTP; 362 cmsg->cmsg_type = SCTP_SNDRCV; 363 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 364 outmessage.msg_controllen = cmsg->cmsg_len; 365 sinfo = CMSG_DATA(cmsg); 366 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 367 368 sinfo->sinfo_flags |= MSG_EOF; 369 sinfo->sinfo_assoc_id = associd; 370 371 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0); 372 373 if (ret != 0) 374 log_print("send EOF to node failed: %d", ret); 375 } 376 377 /* INIT failed but we don't know which node... 378 restart INIT on all pending nodes */ 379 static void sctp_init_failed(void) 380 { 381 int i; 382 struct connection *con; 383 384 down(&connections_lock); 385 for (i=1; i<=max_nodeid; i++) { 386 con = __nodeid2con(i, 0); 387 if (!con) 388 continue; 389 con->sctp_assoc = 0; 390 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 391 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 392 queue_work(send_workqueue, &con->swork); 393 } 394 } 395 } 396 up(&connections_lock); 397 } 398 399 /* Something happened to an association */ 400 static void process_sctp_notification(struct connection *con, 401 struct msghdr *msg, char *buf) 402 { 403 union sctp_notification *sn = (union sctp_notification *)buf; 404 405 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { 406 switch (sn->sn_assoc_change.sac_state) { 407 408 case SCTP_COMM_UP: 409 case SCTP_RESTART: 410 { 411 /* Check that the new node is in the lockspace */ 412 struct sctp_prim prim; 413 int nodeid; 414 int prim_len, ret; 415 int addr_len; 416 struct connection *new_con; 417 struct file *file; 418 sctp_peeloff_arg_t parg; 419 int parglen = sizeof(parg); 420 421 /* 422 * We get this before any data for an association. 423 * We verify that the node is in the cluster and 424 * then peel off a socket for it. 425 */ 426 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 427 log_print("COMM_UP for invalid assoc ID %d", 428 (int)sn->sn_assoc_change.sac_assoc_id); 429 sctp_init_failed(); 430 return; 431 } 432 memset(&prim, 0, sizeof(struct sctp_prim)); 433 prim_len = sizeof(struct sctp_prim); 434 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id; 435 436 ret = kernel_getsockopt(con->sock, 437 IPPROTO_SCTP, 438 SCTP_PRIMARY_ADDR, 439 (char*)&prim, 440 &prim_len); 441 if (ret < 0) { 442 log_print("getsockopt/sctp_primary_addr on " 443 "new assoc %d failed : %d", 444 (int)sn->sn_assoc_change.sac_assoc_id, 445 ret); 446 447 /* Retry INIT later */ 448 new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 449 if (new_con) 450 clear_bit(CF_CONNECT_PENDING, &con->flags); 451 return; 452 } 453 make_sockaddr(&prim.ssp_addr, 0, &addr_len); 454 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) { 455 int i; 456 unsigned char *b=(unsigned char *)&prim.ssp_addr; 457 log_print("reject connect from unknown addr"); 458 for (i=0; i<sizeof(struct sockaddr_storage);i++) 459 printk("%02x ", b[i]); 460 printk("\n"); 461 sctp_send_shutdown(prim.ssp_assoc_id); 462 return; 463 } 464 465 new_con = nodeid2con(nodeid, GFP_KERNEL); 466 if (!new_con) 467 return; 468 469 /* Peel off a new sock */ 470 parg.associd = sn->sn_assoc_change.sac_assoc_id; 471 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, 472 SCTP_SOCKOPT_PEELOFF, 473 (void *)&parg, &parglen); 474 if (ret) { 475 log_print("Can't peel off a socket for " 476 "connection %d to node %d: err=%d\n", 477 parg.associd, nodeid, ret); 478 } 479 file = fget(parg.sd); 480 new_con->sock = SOCKET_I(file->f_dentry->d_inode); 481 add_sock(new_con->sock, new_con); 482 fput(file); 483 put_unused_fd(parg.sd); 484 485 log_print("got new/restarted association %d nodeid %d", 486 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 487 488 /* Send any pending writes */ 489 clear_bit(CF_CONNECT_PENDING, &new_con->flags); 490 clear_bit(CF_INIT_PENDING, &con->flags); 491 if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { 492 queue_work(send_workqueue, &new_con->swork); 493 } 494 if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags)) 495 queue_work(recv_workqueue, &new_con->rwork); 496 } 497 break; 498 499 case SCTP_COMM_LOST: 500 case SCTP_SHUTDOWN_COMP: 501 { 502 con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 503 if (con) { 504 con->sctp_assoc = 0; 505 } 506 } 507 break; 508 509 /* We don't know which INIT failed, so clear the PENDING flags 510 * on them all. if assoc_id is zero then it will then try 511 * again */ 512 513 case SCTP_CANT_STR_ASSOC: 514 { 515 log_print("Can't start SCTP association - retrying"); 516 sctp_init_failed(); 517 } 518 break; 519 520 default: 521 log_print("unexpected SCTP assoc change id=%d state=%d", 522 (int)sn->sn_assoc_change.sac_assoc_id, 523 sn->sn_assoc_change.sac_state); 524 } 525 } 526 } 527 528 /* Data received from remote end */ 529 static int receive_from_sock(struct connection *con) 530 { 531 int ret = 0; 532 struct msghdr msg = {}; 533 struct kvec iov[2]; 534 unsigned len; 535 int r; 536 int call_again_soon = 0; 537 int nvec; 538 char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 539 540 mutex_lock(&con->sock_mutex); 541 542 if (con->sock == NULL) { 543 ret = -EAGAIN; 544 goto out_close; 545 } 546 547 if (con->rx_page == NULL) { 548 /* 549 * This doesn't need to be atomic, but I think it should 550 * improve performance if it is. 551 */ 552 con->rx_page = alloc_page(GFP_ATOMIC); 553 if (con->rx_page == NULL) 554 goto out_resched; 555 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 556 } 557 558 /* Only SCTP needs these really */ 559 memset(&incmsg, 0, sizeof(incmsg)); 560 msg.msg_control = incmsg; 561 msg.msg_controllen = sizeof(incmsg); 562 563 /* 564 * iov[0] is the bit of the circular buffer between the current end 565 * point (cb.base + cb.len) and the end of the buffer. 566 */ 567 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 568 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 569 iov[1].iov_len = 0; 570 nvec = 1; 571 572 /* 573 * iov[1] is the bit of the circular buffer between the start of the 574 * buffer and the start of the currently used section (cb.base) 575 */ 576 if (cbuf_data(&con->cb) >= con->cb.base) { 577 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 578 iov[1].iov_len = con->cb.base; 579 iov[1].iov_base = page_address(con->rx_page); 580 nvec = 2; 581 } 582 len = iov[0].iov_len + iov[1].iov_len; 583 584 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len, 585 MSG_DONTWAIT | MSG_NOSIGNAL); 586 if (ret <= 0) 587 goto out_close; 588 589 /* Process SCTP notifications */ 590 if (msg.msg_flags & MSG_NOTIFICATION) { 591 msg.msg_control = incmsg; 592 msg.msg_controllen = sizeof(incmsg); 593 594 process_sctp_notification(con, &msg, 595 page_address(con->rx_page) + con->cb.base); 596 mutex_unlock(&con->sock_mutex); 597 return 0; 598 } 599 BUG_ON(con->nodeid == 0); 600 601 if (ret == len) 602 call_again_soon = 1; 603 cbuf_add(&con->cb, ret); 604 ret = dlm_process_incoming_buffer(con->nodeid, 605 page_address(con->rx_page), 606 con->cb.base, con->cb.len, 607 PAGE_CACHE_SIZE); 608 if (ret == -EBADMSG) { 609 log_print("lowcomms: addr=%p, base=%u, len=%u, " 610 "iov_len=%u, iov_base[0]=%p, read=%d", 611 page_address(con->rx_page), con->cb.base, con->cb.len, 612 len, iov[0].iov_base, r); 613 } 614 if (ret < 0) 615 goto out_close; 616 cbuf_eat(&con->cb, ret); 617 618 if (cbuf_empty(&con->cb) && !call_again_soon) { 619 __free_page(con->rx_page); 620 con->rx_page = NULL; 621 } 622 623 if (call_again_soon) 624 goto out_resched; 625 mutex_unlock(&con->sock_mutex); 626 return 0; 627 628 out_resched: 629 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 630 queue_work(recv_workqueue, &con->rwork); 631 mutex_unlock(&con->sock_mutex); 632 return -EAGAIN; 633 634 out_close: 635 mutex_unlock(&con->sock_mutex); 636 if (ret != -EAGAIN) { 637 close_connection(con, false); 638 /* Reconnect when there is something to send */ 639 } 640 /* Don't return success if we really got EOF */ 641 if (ret == 0) 642 ret = -EAGAIN; 643 644 return ret; 645 } 646 647 /* Listening socket is busy, accept a connection */ 648 static int tcp_accept_from_sock(struct connection *con) 649 { 650 int result; 651 struct sockaddr_storage peeraddr; 652 struct socket *newsock; 653 int len; 654 int nodeid; 655 struct connection *newcon; 656 struct connection *addcon; 657 658 memset(&peeraddr, 0, sizeof(peeraddr)); 659 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 660 IPPROTO_TCP, &newsock); 661 if (result < 0) 662 return -ENOMEM; 663 664 mutex_lock_nested(&con->sock_mutex, 0); 665 666 result = -ENOTCONN; 667 if (con->sock == NULL) 668 goto accept_err; 669 670 newsock->type = con->sock->type; 671 newsock->ops = con->sock->ops; 672 673 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 674 if (result < 0) 675 goto accept_err; 676 677 /* Get the connected socket's peer */ 678 memset(&peeraddr, 0, sizeof(peeraddr)); 679 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 680 &len, 2)) { 681 result = -ECONNABORTED; 682 goto accept_err; 683 } 684 685 /* Get the new node's NODEID */ 686 make_sockaddr(&peeraddr, 0, &len); 687 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 688 log_print("connect from non cluster node"); 689 sock_release(newsock); 690 mutex_unlock(&con->sock_mutex); 691 return -1; 692 } 693 694 log_print("got connection from %d", nodeid); 695 696 /* Check to see if we already have a connection to this node. This 697 * could happen if the two nodes initiate a connection at roughly 698 * the same time and the connections cross on the wire. 699 * In this case we store the incoming one in "othercon" 700 */ 701 newcon = nodeid2con(nodeid, GFP_KERNEL); 702 if (!newcon) { 703 result = -ENOMEM; 704 goto accept_err; 705 } 706 mutex_lock_nested(&newcon->sock_mutex, 1); 707 if (newcon->sock) { 708 struct connection *othercon = newcon->othercon; 709 710 if (!othercon) { 711 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); 712 if (!othercon) { 713 log_print("failed to allocate incoming socket"); 714 mutex_unlock(&newcon->sock_mutex); 715 result = -ENOMEM; 716 goto accept_err; 717 } 718 othercon->nodeid = nodeid; 719 othercon->rx_action = receive_from_sock; 720 mutex_init(&othercon->sock_mutex); 721 INIT_WORK(&othercon->swork, process_send_sockets); 722 INIT_WORK(&othercon->rwork, process_recv_sockets); 723 set_bit(CF_IS_OTHERCON, &othercon->flags); 724 } 725 if (!othercon->sock) { 726 newcon->othercon = othercon; 727 othercon->sock = newsock; 728 newsock->sk->sk_user_data = othercon; 729 add_sock(newsock, othercon); 730 addcon = othercon; 731 } 732 else { 733 printk("Extra connection from node %d attempted\n", nodeid); 734 result = -EAGAIN; 735 mutex_unlock(&newcon->sock_mutex); 736 goto accept_err; 737 } 738 } 739 else { 740 newsock->sk->sk_user_data = newcon; 741 newcon->rx_action = receive_from_sock; 742 add_sock(newsock, newcon); 743 addcon = newcon; 744 } 745 746 mutex_unlock(&newcon->sock_mutex); 747 748 /* 749 * Add it to the active queue in case we got data 750 * beween processing the accept adding the socket 751 * to the read_sockets list 752 */ 753 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 754 queue_work(recv_workqueue, &addcon->rwork); 755 mutex_unlock(&con->sock_mutex); 756 757 return 0; 758 759 accept_err: 760 mutex_unlock(&con->sock_mutex); 761 sock_release(newsock); 762 763 if (result != -EAGAIN) 764 log_print("error accepting connection from node: %d", result); 765 return result; 766 } 767 768 static void free_entry(struct writequeue_entry *e) 769 { 770 __free_page(e->page); 771 kfree(e); 772 } 773 774 /* Initiate an SCTP association. 775 This is a special case of send_to_sock() in that we don't yet have a 776 peeled-off socket for this association, so we use the listening socket 777 and add the primary IP address of the remote node. 778 */ 779 static void sctp_init_assoc(struct connection *con) 780 { 781 struct sockaddr_storage rem_addr; 782 char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 783 struct msghdr outmessage; 784 struct cmsghdr *cmsg; 785 struct sctp_sndrcvinfo *sinfo; 786 struct connection *base_con; 787 struct writequeue_entry *e; 788 int len, offset; 789 int ret; 790 int addrlen; 791 struct kvec iov[1]; 792 793 if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) 794 return; 795 796 if (con->retries++ > MAX_CONNECT_RETRIES) 797 return; 798 799 log_print("Initiating association with node %d", con->nodeid); 800 801 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) { 802 log_print("no address for nodeid %d", con->nodeid); 803 return; 804 } 805 base_con = nodeid2con(0, 0); 806 BUG_ON(base_con == NULL); 807 808 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen); 809 810 outmessage.msg_name = &rem_addr; 811 outmessage.msg_namelen = addrlen; 812 outmessage.msg_control = outcmsg; 813 outmessage.msg_controllen = sizeof(outcmsg); 814 outmessage.msg_flags = MSG_EOR; 815 816 spin_lock(&con->writequeue_lock); 817 e = list_entry(con->writequeue.next, struct writequeue_entry, 818 list); 819 820 BUG_ON((struct list_head *) e == &con->writequeue); 821 822 len = e->len; 823 offset = e->offset; 824 spin_unlock(&con->writequeue_lock); 825 kmap(e->page); 826 827 /* Send the first block off the write queue */ 828 iov[0].iov_base = page_address(e->page)+offset; 829 iov[0].iov_len = len; 830 831 cmsg = CMSG_FIRSTHDR(&outmessage); 832 cmsg->cmsg_level = IPPROTO_SCTP; 833 cmsg->cmsg_type = SCTP_SNDRCV; 834 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 835 sinfo = CMSG_DATA(cmsg); 836 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 837 sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); 838 outmessage.msg_controllen = cmsg->cmsg_len; 839 840 ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); 841 if (ret < 0) { 842 log_print("Send first packet to node %d failed: %d", 843 con->nodeid, ret); 844 845 /* Try again later */ 846 clear_bit(CF_CONNECT_PENDING, &con->flags); 847 clear_bit(CF_INIT_PENDING, &con->flags); 848 } 849 else { 850 spin_lock(&con->writequeue_lock); 851 e->offset += ret; 852 e->len -= ret; 853 854 if (e->len == 0 && e->users == 0) { 855 list_del(&e->list); 856 kunmap(e->page); 857 free_entry(e); 858 } 859 spin_unlock(&con->writequeue_lock); 860 } 861 } 862 863 /* Connect a new socket to its peer */ 864 static void tcp_connect_to_sock(struct connection *con) 865 { 866 int result = -EHOSTUNREACH; 867 struct sockaddr_storage saddr; 868 int addr_len; 869 struct socket *sock; 870 871 if (con->nodeid == 0) { 872 log_print("attempt to connect sock 0 foiled"); 873 return; 874 } 875 876 mutex_lock(&con->sock_mutex); 877 if (con->retries++ > MAX_CONNECT_RETRIES) 878 goto out; 879 880 /* Some odd races can cause double-connects, ignore them */ 881 if (con->sock) { 882 result = 0; 883 goto out; 884 } 885 886 /* Create a socket to communicate with */ 887 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 888 IPPROTO_TCP, &sock); 889 if (result < 0) 890 goto out_err; 891 892 memset(&saddr, 0, sizeof(saddr)); 893 if (dlm_nodeid_to_addr(con->nodeid, &saddr)) 894 goto out_err; 895 896 sock->sk->sk_user_data = con; 897 con->rx_action = receive_from_sock; 898 con->connect_action = tcp_connect_to_sock; 899 add_sock(sock, con); 900 901 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 902 903 log_print("connecting to %d", con->nodeid); 904 result = 905 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 906 O_NONBLOCK); 907 if (result == -EINPROGRESS) 908 result = 0; 909 if (result == 0) 910 goto out; 911 912 out_err: 913 if (con->sock) { 914 sock_release(con->sock); 915 con->sock = NULL; 916 } 917 /* 918 * Some errors are fatal and this list might need adjusting. For other 919 * errors we try again until the max number of retries is reached. 920 */ 921 if (result != -EHOSTUNREACH && result != -ENETUNREACH && 922 result != -ENETDOWN && result != EINVAL 923 && result != -EPROTONOSUPPORT) { 924 lowcomms_connect_sock(con); 925 result = 0; 926 } 927 out: 928 mutex_unlock(&con->sock_mutex); 929 return; 930 } 931 932 static struct socket *tcp_create_listen_sock(struct connection *con, 933 struct sockaddr_storage *saddr) 934 { 935 struct socket *sock = NULL; 936 int result = 0; 937 int one = 1; 938 int addr_len; 939 940 if (dlm_local_addr[0]->ss_family == AF_INET) 941 addr_len = sizeof(struct sockaddr_in); 942 else 943 addr_len = sizeof(struct sockaddr_in6); 944 945 /* Create a socket to communicate with */ 946 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 947 IPPROTO_TCP, &sock); 948 if (result < 0) { 949 log_print("Can't create listening comms socket"); 950 goto create_out; 951 } 952 953 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 954 (char *)&one, sizeof(one)); 955 956 if (result < 0) { 957 log_print("Failed to set SO_REUSEADDR on socket: %d", result); 958 } 959 sock->sk->sk_user_data = con; 960 con->rx_action = tcp_accept_from_sock; 961 con->connect_action = tcp_connect_to_sock; 962 con->sock = sock; 963 964 /* Bind to our port */ 965 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 966 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 967 if (result < 0) { 968 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 969 sock_release(sock); 970 sock = NULL; 971 con->sock = NULL; 972 goto create_out; 973 } 974 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 975 (char *)&one, sizeof(one)); 976 if (result < 0) { 977 log_print("Set keepalive failed: %d", result); 978 } 979 980 result = sock->ops->listen(sock, 5); 981 if (result < 0) { 982 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 983 sock_release(sock); 984 sock = NULL; 985 goto create_out; 986 } 987 988 create_out: 989 return sock; 990 } 991 992 /* Get local addresses */ 993 static void init_local(void) 994 { 995 struct sockaddr_storage sas, *addr; 996 int i; 997 998 dlm_local_count = 0; 999 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) { 1000 if (dlm_our_addr(&sas, i)) 1001 break; 1002 1003 addr = kmalloc(sizeof(*addr), GFP_KERNEL); 1004 if (!addr) 1005 break; 1006 memcpy(addr, &sas, sizeof(*addr)); 1007 dlm_local_addr[dlm_local_count++] = addr; 1008 } 1009 } 1010 1011 /* Bind to an IP address. SCTP allows multiple address so it can do 1012 multi-homing */ 1013 static int add_sctp_bind_addr(struct connection *sctp_con, 1014 struct sockaddr_storage *addr, 1015 int addr_len, int num) 1016 { 1017 int result = 0; 1018 1019 if (num == 1) 1020 result = kernel_bind(sctp_con->sock, 1021 (struct sockaddr *) addr, 1022 addr_len); 1023 else 1024 result = kernel_setsockopt(sctp_con->sock, SOL_SCTP, 1025 SCTP_SOCKOPT_BINDX_ADD, 1026 (char *)addr, addr_len); 1027 1028 if (result < 0) 1029 log_print("Can't bind to port %d addr number %d", 1030 dlm_config.ci_tcp_port, num); 1031 1032 return result; 1033 } 1034 1035 /* Initialise SCTP socket and bind to all interfaces */ 1036 static int sctp_listen_for_all(void) 1037 { 1038 struct socket *sock = NULL; 1039 struct sockaddr_storage localaddr; 1040 struct sctp_event_subscribe subscribe; 1041 int result = -EINVAL, num = 1, i, addr_len; 1042 struct connection *con = nodeid2con(0, GFP_KERNEL); 1043 int bufsize = NEEDED_RMEM; 1044 1045 if (!con) 1046 return -ENOMEM; 1047 1048 log_print("Using SCTP for communications"); 1049 1050 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET, 1051 IPPROTO_SCTP, &sock); 1052 if (result < 0) { 1053 log_print("Can't create comms socket, check SCTP is loaded"); 1054 goto out; 1055 } 1056 1057 /* Listen for events */ 1058 memset(&subscribe, 0, sizeof(subscribe)); 1059 subscribe.sctp_data_io_event = 1; 1060 subscribe.sctp_association_event = 1; 1061 subscribe.sctp_send_failure_event = 1; 1062 subscribe.sctp_shutdown_event = 1; 1063 subscribe.sctp_partial_delivery_event = 1; 1064 1065 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, 1066 (char *)&bufsize, sizeof(bufsize)); 1067 if (result) 1068 log_print("Error increasing buffer space on socket %d", result); 1069 1070 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS, 1071 (char *)&subscribe, sizeof(subscribe)); 1072 if (result < 0) { 1073 log_print("Failed to set SCTP_EVENTS on socket: result=%d", 1074 result); 1075 goto create_delsock; 1076 } 1077 1078 /* Init con struct */ 1079 sock->sk->sk_user_data = con; 1080 con->sock = sock; 1081 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1082 con->rx_action = receive_from_sock; 1083 con->connect_action = sctp_init_assoc; 1084 1085 /* Bind to all interfaces. */ 1086 for (i = 0; i < dlm_local_count; i++) { 1087 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1088 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len); 1089 1090 result = add_sctp_bind_addr(con, &localaddr, addr_len, num); 1091 if (result) 1092 goto create_delsock; 1093 ++num; 1094 } 1095 1096 result = sock->ops->listen(sock, 5); 1097 if (result < 0) { 1098 log_print("Can't set socket listening"); 1099 goto create_delsock; 1100 } 1101 1102 return 0; 1103 1104 create_delsock: 1105 sock_release(sock); 1106 con->sock = NULL; 1107 out: 1108 return result; 1109 } 1110 1111 static int tcp_listen_for_all(void) 1112 { 1113 struct socket *sock = NULL; 1114 struct connection *con = nodeid2con(0, GFP_KERNEL); 1115 int result = -EINVAL; 1116 1117 if (!con) 1118 return -ENOMEM; 1119 1120 /* We don't support multi-homed hosts */ 1121 if (dlm_local_addr[1] != NULL) { 1122 log_print("TCP protocol can't handle multi-homed hosts, " 1123 "try SCTP"); 1124 return -EINVAL; 1125 } 1126 1127 log_print("Using TCP for communications"); 1128 1129 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1130 if (sock) { 1131 add_sock(sock, con); 1132 result = 0; 1133 } 1134 else { 1135 result = -EADDRINUSE; 1136 } 1137 1138 return result; 1139 } 1140 1141 1142 1143 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1144 gfp_t allocation) 1145 { 1146 struct writequeue_entry *entry; 1147 1148 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1149 if (!entry) 1150 return NULL; 1151 1152 entry->page = alloc_page(allocation); 1153 if (!entry->page) { 1154 kfree(entry); 1155 return NULL; 1156 } 1157 1158 entry->offset = 0; 1159 entry->len = 0; 1160 entry->end = 0; 1161 entry->users = 0; 1162 entry->con = con; 1163 1164 return entry; 1165 } 1166 1167 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1168 { 1169 struct connection *con; 1170 struct writequeue_entry *e; 1171 int offset = 0; 1172 int users = 0; 1173 1174 con = nodeid2con(nodeid, allocation); 1175 if (!con) 1176 return NULL; 1177 1178 spin_lock(&con->writequeue_lock); 1179 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1180 if ((&e->list == &con->writequeue) || 1181 (PAGE_CACHE_SIZE - e->end < len)) { 1182 e = NULL; 1183 } else { 1184 offset = e->end; 1185 e->end += len; 1186 users = e->users++; 1187 } 1188 spin_unlock(&con->writequeue_lock); 1189 1190 if (e) { 1191 got_one: 1192 if (users == 0) 1193 kmap(e->page); 1194 *ppc = page_address(e->page) + offset; 1195 return e; 1196 } 1197 1198 e = new_writequeue_entry(con, allocation); 1199 if (e) { 1200 spin_lock(&con->writequeue_lock); 1201 offset = e->end; 1202 e->end += len; 1203 users = e->users++; 1204 list_add_tail(&e->list, &con->writequeue); 1205 spin_unlock(&con->writequeue_lock); 1206 goto got_one; 1207 } 1208 return NULL; 1209 } 1210 1211 void dlm_lowcomms_commit_buffer(void *mh) 1212 { 1213 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1214 struct connection *con = e->con; 1215 int users; 1216 1217 spin_lock(&con->writequeue_lock); 1218 users = --e->users; 1219 if (users) 1220 goto out; 1221 e->len = e->end - e->offset; 1222 kunmap(e->page); 1223 spin_unlock(&con->writequeue_lock); 1224 1225 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 1226 queue_work(send_workqueue, &con->swork); 1227 } 1228 return; 1229 1230 out: 1231 spin_unlock(&con->writequeue_lock); 1232 return; 1233 } 1234 1235 /* Send a message */ 1236 static void send_to_sock(struct connection *con) 1237 { 1238 int ret = 0; 1239 ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int); 1240 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1241 struct writequeue_entry *e; 1242 int len, offset; 1243 1244 mutex_lock(&con->sock_mutex); 1245 if (con->sock == NULL) 1246 goto out_connect; 1247 1248 sendpage = con->sock->ops->sendpage; 1249 1250 spin_lock(&con->writequeue_lock); 1251 for (;;) { 1252 e = list_entry(con->writequeue.next, struct writequeue_entry, 1253 list); 1254 if ((struct list_head *) e == &con->writequeue) 1255 break; 1256 1257 len = e->len; 1258 offset = e->offset; 1259 BUG_ON(len == 0 && e->users == 0); 1260 spin_unlock(&con->writequeue_lock); 1261 kmap(e->page); 1262 1263 ret = 0; 1264 if (len) { 1265 ret = sendpage(con->sock, e->page, offset, len, 1266 msg_flags); 1267 if (ret == -EAGAIN || ret == 0) { 1268 cond_resched(); 1269 goto out; 1270 } 1271 if (ret <= 0) 1272 goto send_error; 1273 } 1274 /* Don't starve people filling buffers */ 1275 cond_resched(); 1276 1277 spin_lock(&con->writequeue_lock); 1278 e->offset += ret; 1279 e->len -= ret; 1280 1281 if (e->len == 0 && e->users == 0) { 1282 list_del(&e->list); 1283 kunmap(e->page); 1284 free_entry(e); 1285 continue; 1286 } 1287 } 1288 spin_unlock(&con->writequeue_lock); 1289 out: 1290 mutex_unlock(&con->sock_mutex); 1291 return; 1292 1293 send_error: 1294 mutex_unlock(&con->sock_mutex); 1295 close_connection(con, false); 1296 lowcomms_connect_sock(con); 1297 return; 1298 1299 out_connect: 1300 mutex_unlock(&con->sock_mutex); 1301 if (!test_bit(CF_INIT_PENDING, &con->flags)) 1302 lowcomms_connect_sock(con); 1303 return; 1304 } 1305 1306 static void clean_one_writequeue(struct connection *con) 1307 { 1308 struct list_head *list; 1309 struct list_head *temp; 1310 1311 spin_lock(&con->writequeue_lock); 1312 list_for_each_safe(list, temp, &con->writequeue) { 1313 struct writequeue_entry *e = 1314 list_entry(list, struct writequeue_entry, list); 1315 list_del(&e->list); 1316 free_entry(e); 1317 } 1318 spin_unlock(&con->writequeue_lock); 1319 } 1320 1321 /* Called from recovery when it knows that a node has 1322 left the cluster */ 1323 int dlm_lowcomms_close(int nodeid) 1324 { 1325 struct connection *con; 1326 1327 log_print("closing connection to node %d", nodeid); 1328 con = nodeid2con(nodeid, 0); 1329 if (con) { 1330 clean_one_writequeue(con); 1331 close_connection(con, true); 1332 } 1333 return 0; 1334 } 1335 1336 /* Receive workqueue function */ 1337 static void process_recv_sockets(struct work_struct *work) 1338 { 1339 struct connection *con = container_of(work, struct connection, rwork); 1340 int err; 1341 1342 clear_bit(CF_READ_PENDING, &con->flags); 1343 do { 1344 err = con->rx_action(con); 1345 } while (!err); 1346 } 1347 1348 /* Send workqueue function */ 1349 static void process_send_sockets(struct work_struct *work) 1350 { 1351 struct connection *con = container_of(work, struct connection, swork); 1352 1353 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 1354 con->connect_action(con); 1355 } 1356 clear_bit(CF_WRITE_PENDING, &con->flags); 1357 send_to_sock(con); 1358 } 1359 1360 1361 /* Discard all entries on the write queues */ 1362 static void clean_writequeues(void) 1363 { 1364 int nodeid; 1365 1366 for (nodeid = 1; nodeid <= max_nodeid; nodeid++) { 1367 struct connection *con = __nodeid2con(nodeid, 0); 1368 1369 if (con) 1370 clean_one_writequeue(con); 1371 } 1372 } 1373 1374 static void work_stop(void) 1375 { 1376 destroy_workqueue(recv_workqueue); 1377 destroy_workqueue(send_workqueue); 1378 } 1379 1380 static int work_start(void) 1381 { 1382 int error; 1383 recv_workqueue = create_workqueue("dlm_recv"); 1384 error = IS_ERR(recv_workqueue); 1385 if (error) { 1386 log_print("can't start dlm_recv %d", error); 1387 return error; 1388 } 1389 1390 send_workqueue = create_singlethread_workqueue("dlm_send"); 1391 error = IS_ERR(send_workqueue); 1392 if (error) { 1393 log_print("can't start dlm_send %d", error); 1394 destroy_workqueue(recv_workqueue); 1395 return error; 1396 } 1397 1398 return 0; 1399 } 1400 1401 void dlm_lowcomms_stop(void) 1402 { 1403 int i; 1404 struct connection *con; 1405 1406 /* Set all the flags to prevent any 1407 socket activity. 1408 */ 1409 down(&connections_lock); 1410 for (i = 0; i <= max_nodeid; i++) { 1411 con = __nodeid2con(i, 0); 1412 if (con) { 1413 con->flags |= 0x0F; 1414 if (con->sock) 1415 con->sock->sk->sk_user_data = NULL; 1416 } 1417 } 1418 up(&connections_lock); 1419 1420 work_stop(); 1421 1422 down(&connections_lock); 1423 clean_writequeues(); 1424 1425 for (i = 0; i <= max_nodeid; i++) { 1426 con = __nodeid2con(i, 0); 1427 if (con) { 1428 close_connection(con, true); 1429 kmem_cache_free(con_cache, con); 1430 } 1431 } 1432 max_nodeid = 0; 1433 up(&connections_lock); 1434 kmem_cache_destroy(con_cache); 1435 idr_init(&connections_idr); 1436 } 1437 1438 int dlm_lowcomms_start(void) 1439 { 1440 int error = -EINVAL; 1441 struct connection *con; 1442 1443 init_local(); 1444 if (!dlm_local_count) { 1445 error = -ENOTCONN; 1446 log_print("no local IP address has been set"); 1447 goto out; 1448 } 1449 1450 error = -ENOMEM; 1451 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1452 __alignof__(struct connection), 0, 1453 NULL); 1454 if (!con_cache) 1455 goto out; 1456 1457 /* Set some sysctl minima */ 1458 if (sysctl_rmem_max < NEEDED_RMEM) 1459 sysctl_rmem_max = NEEDED_RMEM; 1460 1461 /* Start listening */ 1462 if (dlm_config.ci_protocol == 0) 1463 error = tcp_listen_for_all(); 1464 else 1465 error = sctp_listen_for_all(); 1466 if (error) 1467 goto fail_unlisten; 1468 1469 error = work_start(); 1470 if (error) 1471 goto fail_unlisten; 1472 1473 return 0; 1474 1475 fail_unlisten: 1476 con = nodeid2con(0,0); 1477 if (con) { 1478 close_connection(con, false); 1479 kmem_cache_free(con_cache, con); 1480 } 1481 kmem_cache_destroy(con_cache); 1482 1483 out: 1484 return error; 1485 } 1486