1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/in.h> 35 #include <linux/slab.h> 36 #include <linux/vmalloc.h> 37 #include <linux/ratelimit.h> 38 39 #include "rds_single_path.h" 40 #include "rds.h" 41 #include "ib.h" 42 43 /* 44 * Set the selected protocol version 45 */ 46 static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version) 47 { 48 conn->c_version = version; 49 } 50 51 /* 52 * Set up flow control 53 */ 54 static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits) 55 { 56 struct rds_ib_connection *ic = conn->c_transport_data; 57 58 if (rds_ib_sysctl_flow_control && credits != 0) { 59 /* We're doing flow control */ 60 ic->i_flowctl = 1; 61 rds_ib_send_add_credits(conn, credits); 62 } else { 63 ic->i_flowctl = 0; 64 } 65 } 66 67 /* 68 * Tune RNR behavior. Without flow control, we use a rather 69 * low timeout, but not the absolute minimum - this should 70 * be tunable. 71 * 72 * We already set the RNR retry count to 7 (which is the 73 * smallest infinite number :-) above. 74 * If flow control is off, we want to change this back to 0 75 * so that we learn quickly when our credit accounting is 76 * buggy. 77 * 78 * Caller passes in a qp_attr pointer - don't waste stack spacv 79 * by allocation this twice. 80 */ 81 static void 82 rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr) 83 { 84 int ret; 85 86 attr->min_rnr_timer = IB_RNR_TIMER_000_32; 87 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); 88 if (ret) 89 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret); 90 } 91 92 /* 93 * Connection established. 94 * We get here for both outgoing and incoming connection. 95 */ 96 void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) 97 { 98 const struct rds_ib_connect_private *dp = NULL; 99 struct rds_ib_connection *ic = conn->c_transport_data; 100 struct ib_qp_attr qp_attr; 101 int err; 102 103 if (event->param.conn.private_data_len >= sizeof(*dp)) { 104 dp = event->param.conn.private_data; 105 106 /* make sure it isn't empty data */ 107 if (dp->dp_protocol_major) { 108 rds_ib_set_protocol(conn, 109 RDS_PROTOCOL(dp->dp_protocol_major, 110 dp->dp_protocol_minor)); 111 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); 112 } 113 } 114 115 if (conn->c_version < RDS_PROTOCOL(3, 1)) { 116 pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n", 117 &conn->c_laddr, &conn->c_faddr, 118 RDS_PROTOCOL_MAJOR(conn->c_version), 119 RDS_PROTOCOL_MINOR(conn->c_version)); 120 rds_conn_destroy(conn); 121 return; 122 } else { 123 pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n", 124 ic->i_active_side ? "Active" : "Passive", 125 &conn->c_laddr, &conn->c_faddr, 126 RDS_PROTOCOL_MAJOR(conn->c_version), 127 RDS_PROTOCOL_MINOR(conn->c_version), 128 ic->i_flowctl ? ", flow control" : ""); 129 } 130 131 atomic_set(&ic->i_cq_quiesce, 0); 132 133 /* Init rings and fill recv. this needs to wait until protocol 134 * negotiation is complete, since ring layout is different 135 * from 3.1 to 4.1. 136 */ 137 rds_ib_send_init_ring(ic); 138 rds_ib_recv_init_ring(ic); 139 /* Post receive buffers - as a side effect, this will update 140 * the posted credit count. */ 141 rds_ib_recv_refill(conn, 1, GFP_KERNEL); 142 143 /* Tune RNR behavior */ 144 rds_ib_tune_rnr(ic, &qp_attr); 145 146 qp_attr.qp_state = IB_QPS_RTS; 147 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); 148 if (err) 149 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); 150 151 /* update ib_device with this local ipaddr */ 152 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr); 153 if (err) 154 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", 155 err); 156 157 /* If the peer gave us the last packet it saw, process this as if 158 * we had received a regular ACK. */ 159 if (dp) { 160 /* dp structure start is not guaranteed to be 8 bytes aligned. 161 * Since dp_ack_seq is 64-bit extended load operations can be 162 * used so go through get_unaligned to avoid unaligned errors. 163 */ 164 __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq); 165 166 if (dp_ack_seq) 167 rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq), 168 NULL); 169 } 170 171 rds_connect_complete(conn); 172 } 173 174 static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, 175 struct rdma_conn_param *conn_param, 176 struct rds_ib_connect_private *dp, 177 u32 protocol_version, 178 u32 max_responder_resources, 179 u32 max_initiator_depth) 180 { 181 struct rds_ib_connection *ic = conn->c_transport_data; 182 struct rds_ib_device *rds_ibdev = ic->rds_ibdev; 183 184 memset(conn_param, 0, sizeof(struct rdma_conn_param)); 185 186 conn_param->responder_resources = 187 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources); 188 conn_param->initiator_depth = 189 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth); 190 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); 191 conn_param->rnr_retry_count = 7; 192 193 if (dp) { 194 memset(dp, 0, sizeof(*dp)); 195 dp->dp_saddr = conn->c_laddr; 196 dp->dp_daddr = conn->c_faddr; 197 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 198 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 199 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 200 dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic)); 201 202 /* Advertise flow control */ 203 if (ic->i_flowctl) { 204 unsigned int credits; 205 206 credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)); 207 dp->dp_credit = cpu_to_be32(credits); 208 atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits); 209 } 210 211 conn_param->private_data = dp; 212 conn_param->private_data_len = sizeof(*dp); 213 } 214 } 215 216 static void rds_ib_cq_event_handler(struct ib_event *event, void *data) 217 { 218 rdsdebug("event %u (%s) data %p\n", 219 event->event, ib_event_msg(event->event), data); 220 } 221 222 /* Plucking the oldest entry from the ring can be done concurrently with 223 * the thread refilling the ring. Each ring operation is protected by 224 * spinlocks and the transient state of refilling doesn't change the 225 * recording of which entry is oldest. 226 * 227 * This relies on IB only calling one cq comp_handler for each cq so that 228 * there will only be one caller of rds_recv_incoming() per RDS connection. 229 */ 230 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) 231 { 232 struct rds_connection *conn = context; 233 struct rds_ib_connection *ic = conn->c_transport_data; 234 235 rdsdebug("conn %p cq %p\n", conn, cq); 236 237 rds_ib_stats_inc(s_ib_evt_handler_call); 238 239 tasklet_schedule(&ic->i_recv_tasklet); 240 } 241 242 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, 243 struct ib_wc *wcs) 244 { 245 int nr, i; 246 struct ib_wc *wc; 247 248 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { 249 for (i = 0; i < nr; i++) { 250 wc = wcs + i; 251 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 252 (unsigned long long)wc->wr_id, wc->status, 253 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); 254 255 if (wc->wr_id <= ic->i_send_ring.w_nr || 256 wc->wr_id == RDS_IB_ACK_WR_ID) 257 rds_ib_send_cqe_handler(ic, wc); 258 else 259 rds_ib_mr_cqe_handler(ic, wc); 260 261 } 262 } 263 } 264 265 static void rds_ib_tasklet_fn_send(unsigned long data) 266 { 267 struct rds_ib_connection *ic = (struct rds_ib_connection *)data; 268 struct rds_connection *conn = ic->conn; 269 270 rds_ib_stats_inc(s_ib_tasklet_call); 271 272 /* if cq has been already reaped, ignore incoming cq event */ 273 if (atomic_read(&ic->i_cq_quiesce)) 274 return; 275 276 poll_scq(ic, ic->i_send_cq, ic->i_send_wc); 277 ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); 278 poll_scq(ic, ic->i_send_cq, ic->i_send_wc); 279 280 if (rds_conn_up(conn) && 281 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || 282 test_bit(0, &conn->c_map_queued))) 283 rds_send_xmit(&ic->conn->c_path[0]); 284 } 285 286 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, 287 struct ib_wc *wcs, 288 struct rds_ib_ack_state *ack_state) 289 { 290 int nr, i; 291 struct ib_wc *wc; 292 293 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { 294 for (i = 0; i < nr; i++) { 295 wc = wcs + i; 296 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 297 (unsigned long long)wc->wr_id, wc->status, 298 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); 299 300 rds_ib_recv_cqe_handler(ic, wc, ack_state); 301 } 302 } 303 } 304 305 static void rds_ib_tasklet_fn_recv(unsigned long data) 306 { 307 struct rds_ib_connection *ic = (struct rds_ib_connection *)data; 308 struct rds_connection *conn = ic->conn; 309 struct rds_ib_device *rds_ibdev = ic->rds_ibdev; 310 struct rds_ib_ack_state state; 311 312 if (!rds_ibdev) 313 rds_conn_drop(conn); 314 315 rds_ib_stats_inc(s_ib_tasklet_call); 316 317 /* if cq has been already reaped, ignore incoming cq event */ 318 if (atomic_read(&ic->i_cq_quiesce)) 319 return; 320 321 memset(&state, 0, sizeof(state)); 322 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); 323 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); 324 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); 325 326 if (state.ack_next_valid) 327 rds_ib_set_ack(ic, state.ack_next, state.ack_required); 328 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { 329 rds_send_drop_acked(conn, state.ack_recv, NULL); 330 ic->i_ack_recv = state.ack_recv; 331 } 332 333 if (rds_conn_up(conn)) 334 rds_ib_attempt_ack(ic); 335 } 336 337 static void rds_ib_qp_event_handler(struct ib_event *event, void *data) 338 { 339 struct rds_connection *conn = data; 340 struct rds_ib_connection *ic = conn->c_transport_data; 341 342 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, 343 ib_event_msg(event->event)); 344 345 switch (event->event) { 346 case IB_EVENT_COMM_EST: 347 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); 348 break; 349 default: 350 rdsdebug("Fatal QP Event %u (%s) " 351 "- connection %pI4->%pI4, reconnecting\n", 352 event->event, ib_event_msg(event->event), 353 &conn->c_laddr, &conn->c_faddr); 354 rds_conn_drop(conn); 355 break; 356 } 357 } 358 359 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) 360 { 361 struct rds_connection *conn = context; 362 struct rds_ib_connection *ic = conn->c_transport_data; 363 364 rdsdebug("conn %p cq %p\n", conn, cq); 365 366 rds_ib_stats_inc(s_ib_evt_handler_call); 367 368 tasklet_schedule(&ic->i_send_tasklet); 369 } 370 371 static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev) 372 { 373 int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1]; 374 int index = rds_ibdev->dev->num_comp_vectors - 1; 375 int i; 376 377 for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) { 378 if (rds_ibdev->vector_load[i] < min) { 379 index = i; 380 min = rds_ibdev->vector_load[i]; 381 } 382 } 383 384 rds_ibdev->vector_load[index]++; 385 return index; 386 } 387 388 static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index) 389 { 390 rds_ibdev->vector_load[index]--; 391 } 392 393 /* 394 * This needs to be very careful to not leave IS_ERR pointers around for 395 * cleanup to trip over. 396 */ 397 static int rds_ib_setup_qp(struct rds_connection *conn) 398 { 399 struct rds_ib_connection *ic = conn->c_transport_data; 400 struct ib_device *dev = ic->i_cm_id->device; 401 struct ib_qp_init_attr attr; 402 struct ib_cq_init_attr cq_attr = {}; 403 struct rds_ib_device *rds_ibdev; 404 int ret, fr_queue_space; 405 406 /* 407 * It's normal to see a null device if an incoming connection races 408 * with device removal, so we don't print a warning. 409 */ 410 rds_ibdev = rds_ib_get_client_data(dev); 411 if (!rds_ibdev) 412 return -EOPNOTSUPP; 413 414 /* The fr_queue_space is currently set to 512, to add extra space on 415 * completion queue and send queue. This extra space is used for FRMR 416 * registration and invalidation work requests 417 */ 418 fr_queue_space = rds_ibdev->use_fastreg ? 419 (RDS_IB_DEFAULT_FR_WR + 1) + 420 (RDS_IB_DEFAULT_FR_INV_WR + 1) 421 : 0; 422 423 /* add the conn now so that connection establishment has the dev */ 424 rds_ib_add_conn(rds_ibdev, conn); 425 426 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) 427 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); 428 if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1) 429 rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1); 430 431 /* Protection domain and memory range */ 432 ic->i_pd = rds_ibdev->pd; 433 434 ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); 435 cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; 436 cq_attr.comp_vector = ic->i_scq_vector; 437 ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, 438 rds_ib_cq_event_handler, conn, 439 &cq_attr); 440 if (IS_ERR(ic->i_send_cq)) { 441 ret = PTR_ERR(ic->i_send_cq); 442 ic->i_send_cq = NULL; 443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector); 444 rdsdebug("ib_create_cq send failed: %d\n", ret); 445 goto rds_ibdev_out; 446 } 447 448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); 449 cq_attr.cqe = ic->i_recv_ring.w_nr; 450 cq_attr.comp_vector = ic->i_rcq_vector; 451 ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, 452 rds_ib_cq_event_handler, conn, 453 &cq_attr); 454 if (IS_ERR(ic->i_recv_cq)) { 455 ret = PTR_ERR(ic->i_recv_cq); 456 ic->i_recv_cq = NULL; 457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); 458 rdsdebug("ib_create_cq recv failed: %d\n", ret); 459 goto send_cq_out; 460 } 461 462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); 463 if (ret) { 464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret); 465 goto recv_cq_out; 466 } 467 468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); 469 if (ret) { 470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); 471 goto recv_cq_out; 472 } 473 474 /* XXX negotiate max send/recv with remote? */ 475 memset(&attr, 0, sizeof(attr)); 476 attr.event_handler = rds_ib_qp_event_handler; 477 attr.qp_context = conn; 478 /* + 1 to allow for the single ack message */ 479 attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1; 480 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; 481 attr.cap.max_send_sge = rds_ibdev->max_sge; 482 attr.cap.max_recv_sge = RDS_IB_RECV_SGE; 483 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 484 attr.qp_type = IB_QPT_RC; 485 attr.send_cq = ic->i_send_cq; 486 attr.recv_cq = ic->i_recv_cq; 487 atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); 488 atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR); 489 490 /* 491 * XXX this can fail if max_*_wr is too large? Are we supposed 492 * to back off until we get a value that the hardware can support? 493 */ 494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); 495 if (ret) { 496 rdsdebug("rdma_create_qp failed: %d\n", ret); 497 goto recv_cq_out; 498 } 499 500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev, 501 ic->i_send_ring.w_nr * 502 sizeof(struct rds_header), 503 &ic->i_send_hdrs_dma, GFP_KERNEL); 504 if (!ic->i_send_hdrs) { 505 ret = -ENOMEM; 506 rdsdebug("ib_dma_alloc_coherent send failed\n"); 507 goto qp_out; 508 } 509 510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, 511 ic->i_recv_ring.w_nr * 512 sizeof(struct rds_header), 513 &ic->i_recv_hdrs_dma, GFP_KERNEL); 514 if (!ic->i_recv_hdrs) { 515 ret = -ENOMEM; 516 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 517 goto send_hdrs_dma_out; 518 } 519 520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 521 &ic->i_ack_dma, GFP_KERNEL); 522 if (!ic->i_ack) { 523 ret = -ENOMEM; 524 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 525 goto recv_hdrs_dma_out; 526 } 527 528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), 529 ibdev_to_node(dev)); 530 if (!ic->i_sends) { 531 ret = -ENOMEM; 532 rdsdebug("send allocation failed\n"); 533 goto ack_dma_out; 534 } 535 536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), 537 ibdev_to_node(dev)); 538 if (!ic->i_recvs) { 539 ret = -ENOMEM; 540 rdsdebug("recv allocation failed\n"); 541 goto sends_out; 542 } 543 544 rds_ib_recv_init_ack(ic); 545 546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, 547 ic->i_send_cq, ic->i_recv_cq); 548 549 return ret; 550 551 sends_out: 552 vfree(ic->i_sends); 553 ack_dma_out: 554 ib_dma_free_coherent(dev, sizeof(struct rds_header), 555 ic->i_ack, ic->i_ack_dma); 556 recv_hdrs_dma_out: 557 ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr * 558 sizeof(struct rds_header), 559 ic->i_recv_hdrs, ic->i_recv_hdrs_dma); 560 send_hdrs_dma_out: 561 ib_dma_free_coherent(dev, ic->i_send_ring.w_nr * 562 sizeof(struct rds_header), 563 ic->i_send_hdrs, ic->i_send_hdrs_dma); 564 qp_out: 565 rdma_destroy_qp(ic->i_cm_id); 566 recv_cq_out: 567 if (!ib_destroy_cq(ic->i_recv_cq)) 568 ic->i_recv_cq = NULL; 569 send_cq_out: 570 if (!ib_destroy_cq(ic->i_send_cq)) 571 ic->i_send_cq = NULL; 572 rds_ibdev_out: 573 rds_ib_remove_conn(rds_ibdev, conn); 574 rds_ib_dev_put(rds_ibdev); 575 576 return ret; 577 } 578 579 static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) 580 { 581 const struct rds_ib_connect_private *dp = event->param.conn.private_data; 582 u16 common; 583 u32 version = 0; 584 585 /* 586 * rdma_cm private data is odd - when there is any private data in the 587 * request, we will be given a pretty large buffer without telling us the 588 * original size. The only way to tell the difference is by looking at 589 * the contents, which are initialized to zero. 590 * If the protocol version fields aren't set, this is a connection attempt 591 * from an older version. This could could be 3.0 or 2.0 - we can't tell. 592 * We really should have changed this for OFED 1.3 :-( 593 */ 594 595 /* Be paranoid. RDS always has privdata */ 596 if (!event->param.conn.private_data_len) { 597 printk(KERN_NOTICE "RDS incoming connection has no private data, " 598 "rejecting\n"); 599 return 0; 600 } 601 602 /* Even if len is crap *now* I still want to check it. -ASG */ 603 if (event->param.conn.private_data_len < sizeof (*dp) || 604 dp->dp_protocol_major == 0) 605 return RDS_PROTOCOL_3_0; 606 607 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; 608 if (dp->dp_protocol_major == 3 && common) { 609 version = RDS_PROTOCOL_3_0; 610 while ((common >>= 1) != 0) 611 version++; 612 } else 613 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n", 614 &dp->dp_saddr, 615 dp->dp_protocol_major, 616 dp->dp_protocol_minor); 617 return version; 618 } 619 620 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, 621 struct rdma_cm_event *event) 622 { 623 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; 624 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; 625 const struct rds_ib_connect_private *dp = event->param.conn.private_data; 626 struct rds_ib_connect_private dp_rep; 627 struct rds_connection *conn = NULL; 628 struct rds_ib_connection *ic = NULL; 629 struct rdma_conn_param conn_param; 630 u32 version; 631 int err = 1, destroy = 1; 632 633 /* Check whether the remote protocol version matches ours. */ 634 version = rds_ib_protocol_compatible(event); 635 if (!version) 636 goto out; 637 638 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid " 639 "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr, 640 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version), 641 (unsigned long long)be64_to_cpu(lguid), 642 (unsigned long long)be64_to_cpu(fguid)); 643 644 /* RDS/IB is not currently netns aware, thus init_net */ 645 conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr, 646 &rds_ib_transport, GFP_KERNEL); 647 if (IS_ERR(conn)) { 648 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); 649 conn = NULL; 650 goto out; 651 } 652 653 /* 654 * The connection request may occur while the 655 * previous connection exist, e.g. in case of failover. 656 * But as connections may be initiated simultaneously 657 * by both hosts, we have a random backoff mechanism - 658 * see the comment above rds_queue_reconnect() 659 */ 660 mutex_lock(&conn->c_cm_lock); 661 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { 662 if (rds_conn_state(conn) == RDS_CONN_UP) { 663 rdsdebug("incoming connect while connecting\n"); 664 rds_conn_drop(conn); 665 rds_ib_stats_inc(s_ib_listen_closed_stale); 666 } else 667 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) { 668 /* Wait and see - our connect may still be succeeding */ 669 rds_ib_stats_inc(s_ib_connect_raced); 670 } 671 goto out; 672 } 673 674 ic = conn->c_transport_data; 675 676 rds_ib_set_protocol(conn, version); 677 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); 678 679 /* If the peer gave us the last packet it saw, process this as if 680 * we had received a regular ACK. */ 681 if (dp->dp_ack_seq) 682 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); 683 684 BUG_ON(cm_id->context); 685 BUG_ON(ic->i_cm_id); 686 687 ic->i_cm_id = cm_id; 688 cm_id->context = conn; 689 690 /* We got halfway through setting up the ib_connection, if we 691 * fail now, we have to take the long route out of this mess. */ 692 destroy = 0; 693 694 err = rds_ib_setup_qp(conn); 695 if (err) { 696 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); 697 goto out; 698 } 699 700 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version, 701 event->param.conn.responder_resources, 702 event->param.conn.initiator_depth); 703 704 /* rdma_accept() calls rdma_reject() internally if it fails */ 705 if (rdma_accept(cm_id, &conn_param)) 706 rds_ib_conn_error(conn, "rdma_accept failed\n"); 707 708 out: 709 if (conn) 710 mutex_unlock(&conn->c_cm_lock); 711 if (err) 712 rdma_reject(cm_id, NULL, 0); 713 return destroy; 714 } 715 716 717 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) 718 { 719 struct rds_connection *conn = cm_id->context; 720 struct rds_ib_connection *ic = conn->c_transport_data; 721 struct rdma_conn_param conn_param; 722 struct rds_ib_connect_private dp; 723 int ret; 724 725 /* If the peer doesn't do protocol negotiation, we must 726 * default to RDSv3.0 */ 727 rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0); 728 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ 729 730 ret = rds_ib_setup_qp(conn); 731 if (ret) { 732 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret); 733 goto out; 734 } 735 736 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION, 737 UINT_MAX, UINT_MAX); 738 ret = rdma_connect(cm_id, &conn_param); 739 if (ret) 740 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); 741 742 out: 743 /* Beware - returning non-zero tells the rdma_cm to destroy 744 * the cm_id. We should certainly not do it as long as we still 745 * "own" the cm_id. */ 746 if (ret) { 747 if (ic->i_cm_id == cm_id) 748 ret = 0; 749 } 750 ic->i_active_side = true; 751 return ret; 752 } 753 754 int rds_ib_conn_path_connect(struct rds_conn_path *cp) 755 { 756 struct rds_connection *conn = cp->cp_conn; 757 struct rds_ib_connection *ic = conn->c_transport_data; 758 struct sockaddr_in src, dest; 759 int ret; 760 761 /* XXX I wonder what affect the port space has */ 762 /* delegate cm event handler to rdma_transport */ 763 ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn, 764 RDMA_PS_TCP, IB_QPT_RC); 765 if (IS_ERR(ic->i_cm_id)) { 766 ret = PTR_ERR(ic->i_cm_id); 767 ic->i_cm_id = NULL; 768 rdsdebug("rdma_create_id() failed: %d\n", ret); 769 goto out; 770 } 771 772 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); 773 774 src.sin_family = AF_INET; 775 src.sin_addr.s_addr = (__force u32)conn->c_laddr; 776 src.sin_port = (__force u16)htons(0); 777 778 dest.sin_family = AF_INET; 779 dest.sin_addr.s_addr = (__force u32)conn->c_faddr; 780 dest.sin_port = (__force u16)htons(RDS_PORT); 781 782 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, 783 (struct sockaddr *)&dest, 784 RDS_RDMA_RESOLVE_TIMEOUT_MS); 785 if (ret) { 786 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, 787 ret); 788 rdma_destroy_id(ic->i_cm_id); 789 ic->i_cm_id = NULL; 790 } 791 792 out: 793 return ret; 794 } 795 796 /* 797 * This is so careful about only cleaning up resources that were built up 798 * so that it can be called at any point during startup. In fact it 799 * can be called multiple times for a given connection. 800 */ 801 void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) 802 { 803 struct rds_connection *conn = cp->cp_conn; 804 struct rds_ib_connection *ic = conn->c_transport_data; 805 int err = 0; 806 807 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, 808 ic->i_pd, ic->i_send_cq, ic->i_recv_cq, 809 ic->i_cm_id ? ic->i_cm_id->qp : NULL); 810 811 if (ic->i_cm_id) { 812 struct ib_device *dev = ic->i_cm_id->device; 813 814 rdsdebug("disconnecting cm %p\n", ic->i_cm_id); 815 err = rdma_disconnect(ic->i_cm_id); 816 if (err) { 817 /* Actually this may happen quite frequently, when 818 * an outgoing connect raced with an incoming connect. 819 */ 820 rdsdebug("failed to disconnect, cm: %p err %d\n", 821 ic->i_cm_id, err); 822 } 823 824 /* 825 * We want to wait for tx and rx completion to finish 826 * before we tear down the connection, but we have to be 827 * careful not to get stuck waiting on a send ring that 828 * only has unsignaled sends in it. We've shutdown new 829 * sends before getting here so by waiting for signaled 830 * sends to complete we're ensured that there will be no 831 * more tx processing. 832 */ 833 wait_event(rds_ib_ring_empty_wait, 834 rds_ib_ring_empty(&ic->i_recv_ring) && 835 (atomic_read(&ic->i_signaled_sends) == 0) && 836 (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) && 837 (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR)); 838 tasklet_kill(&ic->i_send_tasklet); 839 tasklet_kill(&ic->i_recv_tasklet); 840 841 atomic_set(&ic->i_cq_quiesce, 1); 842 843 /* first destroy the ib state that generates callbacks */ 844 if (ic->i_cm_id->qp) 845 rdma_destroy_qp(ic->i_cm_id); 846 if (ic->i_send_cq) { 847 if (ic->rds_ibdev) 848 ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); 849 ib_destroy_cq(ic->i_send_cq); 850 } 851 852 if (ic->i_recv_cq) { 853 if (ic->rds_ibdev) 854 ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); 855 ib_destroy_cq(ic->i_recv_cq); 856 } 857 858 /* then free the resources that ib callbacks use */ 859 if (ic->i_send_hdrs) 860 ib_dma_free_coherent(dev, 861 ic->i_send_ring.w_nr * 862 sizeof(struct rds_header), 863 ic->i_send_hdrs, 864 ic->i_send_hdrs_dma); 865 866 if (ic->i_recv_hdrs) 867 ib_dma_free_coherent(dev, 868 ic->i_recv_ring.w_nr * 869 sizeof(struct rds_header), 870 ic->i_recv_hdrs, 871 ic->i_recv_hdrs_dma); 872 873 if (ic->i_ack) 874 ib_dma_free_coherent(dev, sizeof(struct rds_header), 875 ic->i_ack, ic->i_ack_dma); 876 877 if (ic->i_sends) 878 rds_ib_send_clear_ring(ic); 879 if (ic->i_recvs) 880 rds_ib_recv_clear_ring(ic); 881 882 rdma_destroy_id(ic->i_cm_id); 883 884 /* 885 * Move connection back to the nodev list. 886 */ 887 if (ic->rds_ibdev) 888 rds_ib_remove_conn(ic->rds_ibdev, conn); 889 890 ic->i_cm_id = NULL; 891 ic->i_pd = NULL; 892 ic->i_send_cq = NULL; 893 ic->i_recv_cq = NULL; 894 ic->i_send_hdrs = NULL; 895 ic->i_recv_hdrs = NULL; 896 ic->i_ack = NULL; 897 } 898 BUG_ON(ic->rds_ibdev); 899 900 /* Clear pending transmit */ 901 if (ic->i_data_op) { 902 struct rds_message *rm; 903 904 rm = container_of(ic->i_data_op, struct rds_message, data); 905 rds_message_put(rm); 906 ic->i_data_op = NULL; 907 } 908 909 /* Clear the ACK state */ 910 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); 911 #ifdef KERNEL_HAS_ATOMIC64 912 atomic64_set(&ic->i_ack_next, 0); 913 #else 914 ic->i_ack_next = 0; 915 #endif 916 ic->i_ack_recv = 0; 917 918 /* Clear flow control state */ 919 ic->i_flowctl = 0; 920 atomic_set(&ic->i_credits, 0); 921 922 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); 923 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); 924 925 if (ic->i_ibinc) { 926 rds_inc_put(&ic->i_ibinc->ii_inc); 927 ic->i_ibinc = NULL; 928 } 929 930 vfree(ic->i_sends); 931 ic->i_sends = NULL; 932 vfree(ic->i_recvs); 933 ic->i_recvs = NULL; 934 ic->i_active_side = false; 935 } 936 937 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) 938 { 939 struct rds_ib_connection *ic; 940 unsigned long flags; 941 int ret; 942 943 /* XXX too lazy? */ 944 ic = kzalloc(sizeof(struct rds_ib_connection), gfp); 945 if (!ic) 946 return -ENOMEM; 947 948 ret = rds_ib_recv_alloc_caches(ic); 949 if (ret) { 950 kfree(ic); 951 return ret; 952 } 953 954 INIT_LIST_HEAD(&ic->ib_node); 955 tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send, 956 (unsigned long)ic); 957 tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv, 958 (unsigned long)ic); 959 mutex_init(&ic->i_recv_mutex); 960 #ifndef KERNEL_HAS_ATOMIC64 961 spin_lock_init(&ic->i_ack_lock); 962 #endif 963 atomic_set(&ic->i_signaled_sends, 0); 964 965 /* 966 * rds_ib_conn_shutdown() waits for these to be emptied so they 967 * must be initialized before it can be called. 968 */ 969 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr); 970 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr); 971 972 ic->conn = conn; 973 conn->c_transport_data = ic; 974 975 spin_lock_irqsave(&ib_nodev_conns_lock, flags); 976 list_add_tail(&ic->ib_node, &ib_nodev_conns); 977 spin_unlock_irqrestore(&ib_nodev_conns_lock, flags); 978 979 980 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data); 981 return 0; 982 } 983 984 /* 985 * Free a connection. Connection must be shut down and not set for reconnect. 986 */ 987 void rds_ib_conn_free(void *arg) 988 { 989 struct rds_ib_connection *ic = arg; 990 spinlock_t *lock_ptr; 991 992 rdsdebug("ic %p\n", ic); 993 994 /* 995 * Conn is either on a dev's list or on the nodev list. 996 * A race with shutdown() or connect() would cause problems 997 * (since rds_ibdev would change) but that should never happen. 998 */ 999 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; 1000 1001 spin_lock_irq(lock_ptr); 1002 list_del(&ic->ib_node); 1003 spin_unlock_irq(lock_ptr); 1004 1005 rds_ib_recv_free_caches(ic); 1006 1007 kfree(ic); 1008 } 1009 1010 1011 /* 1012 * An error occurred on the connection 1013 */ 1014 void 1015 __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...) 1016 { 1017 va_list ap; 1018 1019 rds_conn_drop(conn); 1020 1021 va_start(ap, fmt); 1022 vprintk(fmt, ap); 1023 va_end(ap); 1024 } 1025