1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/in.h> 35 #include <linux/if.h> 36 #include <linux/netdevice.h> 37 #include <linux/inetdevice.h> 38 #include <linux/if_arp.h> 39 #include <linux/delay.h> 40 #include <linux/slab.h> 41 #include <linux/module.h> 42 43 #include "rds_single_path.h" 44 #include "rds.h" 45 #include "ib.h" 46 #include "ib_mr.h" 47 48 static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; 49 static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; 50 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; 51 static atomic_t rds_ib_unloading; 52 53 module_param(rds_ib_mr_1m_pool_size, int, 0444); 54 MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA"); 55 module_param(rds_ib_mr_8k_pool_size, int, 0444); 56 MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA"); 57 module_param(rds_ib_retry_count, int, 0444); 58 MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); 59 60 /* 61 * we have a clumsy combination of RCU and a rwsem protecting this list 62 * because it is used both in the get_mr fast path and while blocking in 63 * the FMR flushing path. 64 */ 65 DECLARE_RWSEM(rds_ib_devices_lock); 66 struct list_head rds_ib_devices; 67 68 /* NOTE: if also grabbing ibdev lock, grab this first */ 69 DEFINE_SPINLOCK(ib_nodev_conns_lock); 70 LIST_HEAD(ib_nodev_conns); 71 72 static void rds_ib_nodev_connect(void) 73 { 74 struct rds_ib_connection *ic; 75 76 spin_lock(&ib_nodev_conns_lock); 77 list_for_each_entry(ic, &ib_nodev_conns, ib_node) 78 rds_conn_connect_if_down(ic->conn); 79 spin_unlock(&ib_nodev_conns_lock); 80 } 81 82 static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev) 83 { 84 struct rds_ib_connection *ic; 85 unsigned long flags; 86 87 spin_lock_irqsave(&rds_ibdev->spinlock, flags); 88 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) 89 rds_conn_drop(ic->conn); 90 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); 91 } 92 93 /* 94 * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references 95 * from interrupt context so we push freing off into a work struct in krdsd. 96 */ 97 static void rds_ib_dev_free(struct work_struct *work) 98 { 99 struct rds_ib_ipaddr *i_ipaddr, *i_next; 100 struct rds_ib_device *rds_ibdev = container_of(work, 101 struct rds_ib_device, free_work); 102 103 if (rds_ibdev->mr_8k_pool) 104 rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool); 105 if (rds_ibdev->mr_1m_pool) 106 rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool); 107 if (rds_ibdev->pd) 108 ib_dealloc_pd(rds_ibdev->pd); 109 110 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { 111 list_del(&i_ipaddr->list); 112 kfree(i_ipaddr); 113 } 114 115 kfree(rds_ibdev->vector_load); 116 117 kfree(rds_ibdev); 118 } 119 120 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) 121 { 122 BUG_ON(refcount_read(&rds_ibdev->refcount) == 0); 123 if (refcount_dec_and_test(&rds_ibdev->refcount)) 124 queue_work(rds_wq, &rds_ibdev->free_work); 125 } 126 127 static void rds_ib_add_one(struct ib_device *device) 128 { 129 struct rds_ib_device *rds_ibdev; 130 bool has_fr, has_fmr; 131 132 /* Only handle IB (no iWARP) devices */ 133 if (device->node_type != RDMA_NODE_IB_CA) 134 return; 135 136 rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, 137 ibdev_to_node(device)); 138 if (!rds_ibdev) 139 return; 140 141 spin_lock_init(&rds_ibdev->spinlock); 142 refcount_set(&rds_ibdev->refcount, 1); 143 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); 144 145 rds_ibdev->max_wrs = device->attrs.max_qp_wr; 146 rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE); 147 148 has_fr = (device->attrs.device_cap_flags & 149 IB_DEVICE_MEM_MGT_EXTENSIONS); 150 has_fmr = (device->alloc_fmr && device->dealloc_fmr && 151 device->map_phys_fmr && device->unmap_fmr); 152 rds_ibdev->use_fastreg = (has_fr && !has_fmr); 153 154 rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32; 155 rds_ibdev->max_1m_mrs = device->attrs.max_mr ? 156 min_t(unsigned int, (device->attrs.max_mr / 2), 157 rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size; 158 159 rds_ibdev->max_8k_mrs = device->attrs.max_mr ? 160 min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE), 161 rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size; 162 163 rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom; 164 rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom; 165 166 rds_ibdev->vector_load = kzalloc(sizeof(int) * device->num_comp_vectors, 167 GFP_KERNEL); 168 if (!rds_ibdev->vector_load) { 169 pr_err("RDS/IB: %s failed to allocate vector memory\n", 170 __func__); 171 goto put_dev; 172 } 173 174 rds_ibdev->dev = device; 175 rds_ibdev->pd = ib_alloc_pd(device, 0); 176 if (IS_ERR(rds_ibdev->pd)) { 177 rds_ibdev->pd = NULL; 178 goto put_dev; 179 } 180 181 rds_ibdev->mr_1m_pool = 182 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL); 183 if (IS_ERR(rds_ibdev->mr_1m_pool)) { 184 rds_ibdev->mr_1m_pool = NULL; 185 goto put_dev; 186 } 187 188 rds_ibdev->mr_8k_pool = 189 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL); 190 if (IS_ERR(rds_ibdev->mr_8k_pool)) { 191 rds_ibdev->mr_8k_pool = NULL; 192 goto put_dev; 193 } 194 195 rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_mrs = %d, max_8k_mrs = %d\n", 196 device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge, 197 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs, 198 rds_ibdev->max_8k_mrs); 199 200 pr_info("RDS/IB: %s: %s supported and preferred\n", 201 device->name, 202 rds_ibdev->use_fastreg ? "FRMR" : "FMR"); 203 204 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); 205 INIT_LIST_HEAD(&rds_ibdev->conn_list); 206 207 down_write(&rds_ib_devices_lock); 208 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); 209 up_write(&rds_ib_devices_lock); 210 refcount_inc(&rds_ibdev->refcount); 211 212 ib_set_client_data(device, &rds_ib_client, rds_ibdev); 213 refcount_inc(&rds_ibdev->refcount); 214 215 rds_ib_nodev_connect(); 216 217 put_dev: 218 rds_ib_dev_put(rds_ibdev); 219 } 220 221 /* 222 * New connections use this to find the device to associate with the 223 * connection. It's not in the fast path so we're not concerned about the 224 * performance of the IB call. (As of this writing, it uses an interrupt 225 * blocking spinlock to serialize walking a per-device list of all registered 226 * clients.) 227 * 228 * RCU is used to handle incoming connections racing with device teardown. 229 * Rather than use a lock to serialize removal from the client_data and 230 * getting a new reference, we use an RCU grace period. The destruction 231 * path removes the device from client_data and then waits for all RCU 232 * readers to finish. 233 * 234 * A new connection can get NULL from this if its arriving on a 235 * device that is in the process of being removed. 236 */ 237 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) 238 { 239 struct rds_ib_device *rds_ibdev; 240 241 rcu_read_lock(); 242 rds_ibdev = ib_get_client_data(device, &rds_ib_client); 243 if (rds_ibdev) 244 refcount_inc(&rds_ibdev->refcount); 245 rcu_read_unlock(); 246 return rds_ibdev; 247 } 248 249 /* 250 * The IB stack is letting us know that a device is going away. This can 251 * happen if the underlying HCA driver is removed or if PCI hotplug is removing 252 * the pci function, for example. 253 * 254 * This can be called at any time and can be racing with any other RDS path. 255 */ 256 static void rds_ib_remove_one(struct ib_device *device, void *client_data) 257 { 258 struct rds_ib_device *rds_ibdev = client_data; 259 260 if (!rds_ibdev) 261 return; 262 263 rds_ib_dev_shutdown(rds_ibdev); 264 265 /* stop connection attempts from getting a reference to this device. */ 266 ib_set_client_data(device, &rds_ib_client, NULL); 267 268 down_write(&rds_ib_devices_lock); 269 list_del_rcu(&rds_ibdev->list); 270 up_write(&rds_ib_devices_lock); 271 272 /* 273 * This synchronize rcu is waiting for readers of both the ib 274 * client data and the devices list to finish before we drop 275 * both of those references. 276 */ 277 synchronize_rcu(); 278 rds_ib_dev_put(rds_ibdev); 279 rds_ib_dev_put(rds_ibdev); 280 } 281 282 struct ib_client rds_ib_client = { 283 .name = "rds_ib", 284 .add = rds_ib_add_one, 285 .remove = rds_ib_remove_one 286 }; 287 288 static int rds_ib_conn_info_visitor(struct rds_connection *conn, 289 void *buffer) 290 { 291 struct rds_info_rdma_connection *iinfo = buffer; 292 struct rds_ib_connection *ic; 293 294 /* We will only ever look at IB transports */ 295 if (conn->c_trans != &rds_ib_transport) 296 return 0; 297 298 iinfo->src_addr = conn->c_laddr; 299 iinfo->dst_addr = conn->c_faddr; 300 301 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); 302 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); 303 if (rds_conn_state(conn) == RDS_CONN_UP) { 304 struct rds_ib_device *rds_ibdev; 305 306 ic = conn->c_transport_data; 307 308 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, 309 (union ib_gid *)&iinfo->dst_gid); 310 311 rds_ibdev = ic->rds_ibdev; 312 iinfo->max_send_wr = ic->i_send_ring.w_nr; 313 iinfo->max_recv_wr = ic->i_recv_ring.w_nr; 314 iinfo->max_send_sge = rds_ibdev->max_sge; 315 rds_ib_get_mr_info(rds_ibdev, iinfo); 316 } 317 return 1; 318 } 319 320 static void rds_ib_ic_info(struct socket *sock, unsigned int len, 321 struct rds_info_iterator *iter, 322 struct rds_info_lengths *lens) 323 { 324 u64 buffer[(sizeof(struct rds_info_rdma_connection) + 7) / 8]; 325 326 rds_for_each_conn_info(sock, len, iter, lens, 327 rds_ib_conn_info_visitor, 328 buffer, 329 sizeof(struct rds_info_rdma_connection)); 330 } 331 332 333 /* 334 * Early RDS/IB was built to only bind to an address if there is an IPoIB 335 * device with that address set. 336 * 337 * If it were me, I'd advocate for something more flexible. Sending and 338 * receiving should be device-agnostic. Transports would try and maintain 339 * connections between peers who have messages queued. Userspace would be 340 * allowed to influence which paths have priority. We could call userspace 341 * asserting this policy "routing". 342 */ 343 static int rds_ib_laddr_check(struct net *net, __be32 addr) 344 { 345 int ret; 346 struct rdma_cm_id *cm_id; 347 struct sockaddr_in sin; 348 349 /* Create a CMA ID and try to bind it. This catches both 350 * IB and iWARP capable NICs. 351 */ 352 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, 353 NULL, RDMA_PS_TCP, IB_QPT_RC); 354 if (IS_ERR(cm_id)) 355 return PTR_ERR(cm_id); 356 357 memset(&sin, 0, sizeof(sin)); 358 sin.sin_family = AF_INET; 359 sin.sin_addr.s_addr = addr; 360 361 /* rdma_bind_addr will only succeed for IB & iWARP devices */ 362 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); 363 /* due to this, we will claim to support iWARP devices unless we 364 check node_type. */ 365 if (ret || !cm_id->device || 366 cm_id->device->node_type != RDMA_NODE_IB_CA) 367 ret = -EADDRNOTAVAIL; 368 369 rdsdebug("addr %pI4 ret %d node type %d\n", 370 &addr, ret, 371 cm_id->device ? cm_id->device->node_type : -1); 372 373 rdma_destroy_id(cm_id); 374 375 return ret; 376 } 377 378 static void rds_ib_unregister_client(void) 379 { 380 ib_unregister_client(&rds_ib_client); 381 /* wait for rds_ib_dev_free() to complete */ 382 flush_workqueue(rds_wq); 383 } 384 385 static void rds_ib_set_unloading(void) 386 { 387 atomic_set(&rds_ib_unloading, 1); 388 } 389 390 static bool rds_ib_is_unloading(struct rds_connection *conn) 391 { 392 struct rds_conn_path *cp = &conn->c_path[0]; 393 394 return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) || 395 atomic_read(&rds_ib_unloading) != 0); 396 } 397 398 void rds_ib_exit(void) 399 { 400 rds_ib_set_unloading(); 401 synchronize_rcu(); 402 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 403 rds_ib_unregister_client(); 404 rds_ib_destroy_nodev_conns(); 405 rds_ib_sysctl_exit(); 406 rds_ib_recv_exit(); 407 rds_trans_unregister(&rds_ib_transport); 408 rds_ib_mr_exit(); 409 } 410 411 struct rds_transport rds_ib_transport = { 412 .laddr_check = rds_ib_laddr_check, 413 .xmit_path_complete = rds_ib_xmit_path_complete, 414 .xmit = rds_ib_xmit, 415 .xmit_rdma = rds_ib_xmit_rdma, 416 .xmit_atomic = rds_ib_xmit_atomic, 417 .recv_path = rds_ib_recv_path, 418 .conn_alloc = rds_ib_conn_alloc, 419 .conn_free = rds_ib_conn_free, 420 .conn_path_connect = rds_ib_conn_path_connect, 421 .conn_path_shutdown = rds_ib_conn_path_shutdown, 422 .inc_copy_to_user = rds_ib_inc_copy_to_user, 423 .inc_free = rds_ib_inc_free, 424 .cm_initiate_connect = rds_ib_cm_initiate_connect, 425 .cm_handle_connect = rds_ib_cm_handle_connect, 426 .cm_connect_complete = rds_ib_cm_connect_complete, 427 .stats_info_copy = rds_ib_stats_info_copy, 428 .exit = rds_ib_exit, 429 .get_mr = rds_ib_get_mr, 430 .sync_mr = rds_ib_sync_mr, 431 .free_mr = rds_ib_free_mr, 432 .flush_mrs = rds_ib_flush_mrs, 433 .t_owner = THIS_MODULE, 434 .t_name = "infiniband", 435 .t_unloading = rds_ib_is_unloading, 436 .t_type = RDS_TRANS_IB 437 }; 438 439 int rds_ib_init(void) 440 { 441 int ret; 442 443 INIT_LIST_HEAD(&rds_ib_devices); 444 445 ret = rds_ib_mr_init(); 446 if (ret) 447 goto out; 448 449 ret = ib_register_client(&rds_ib_client); 450 if (ret) 451 goto out_mr_exit; 452 453 ret = rds_ib_sysctl_init(); 454 if (ret) 455 goto out_ibreg; 456 457 ret = rds_ib_recv_init(); 458 if (ret) 459 goto out_sysctl; 460 461 rds_trans_register(&rds_ib_transport); 462 463 rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 464 465 goto out; 466 467 out_sysctl: 468 rds_ib_sysctl_exit(); 469 out_ibreg: 470 rds_ib_unregister_client(); 471 out_mr_exit: 472 rds_ib_mr_exit(); 473 out: 474 return ret; 475 } 476 477 MODULE_LICENSE("GPL"); 478 479