1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2008 Cisco. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #define pr_fmt(fmt) "user_mad: " fmt 37 38 #include <linux/module.h> 39 #include <linux/init.h> 40 #include <linux/device.h> 41 #include <linux/err.h> 42 #include <linux/fs.h> 43 #include <linux/cdev.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/poll.h> 46 #include <linux/mutex.h> 47 #include <linux/kref.h> 48 #include <linux/compat.h> 49 #include <linux/sched.h> 50 #include <linux/semaphore.h> 51 #include <linux/slab.h> 52 53 #include <linux/uaccess.h> 54 55 #include <rdma/ib_mad.h> 56 #include <rdma/ib_user_mad.h> 57 #include <rdma/rdma_netlink.h> 58 59 #include "core_priv.h" 60 61 MODULE_AUTHOR("Roland Dreier"); 62 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 63 MODULE_LICENSE("Dual BSD/GPL"); 64 65 enum { 66 IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS, 67 IB_UMAD_MAX_AGENTS = 32, 68 69 IB_UMAD_MAJOR = 231, 70 IB_UMAD_MINOR_BASE = 0, 71 IB_UMAD_NUM_FIXED_MINOR = 64, 72 IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR, 73 IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR, 74 }; 75 76 /* 77 * Our lifetime rules for these structs are the following: 78 * device special file is opened, we take a reference on the 79 * ib_umad_port's struct ib_umad_device. We drop these 80 * references in the corresponding close(). 81 * 82 * In addition to references coming from open character devices, there 83 * is one more reference to each ib_umad_device representing the 84 * module's reference taken when allocating the ib_umad_device in 85 * ib_umad_add_one(). 86 * 87 * When destroying an ib_umad_device, we drop the module's reference. 88 */ 89 90 struct ib_umad_port { 91 struct cdev cdev; 92 struct device dev; 93 struct cdev sm_cdev; 94 struct device sm_dev; 95 struct semaphore sm_sem; 96 97 struct mutex file_mutex; 98 struct list_head file_list; 99 100 struct ib_device *ib_dev; 101 struct ib_umad_device *umad_dev; 102 int dev_num; 103 u8 port_num; 104 }; 105 106 struct ib_umad_device { 107 struct kref kref; 108 struct ib_umad_port ports[]; 109 }; 110 111 struct ib_umad_file { 112 struct mutex mutex; 113 struct ib_umad_port *port; 114 struct list_head recv_list; 115 struct list_head send_list; 116 struct list_head port_list; 117 spinlock_t send_lock; 118 wait_queue_head_t recv_wait; 119 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 120 int agents_dead; 121 u8 use_pkey_index; 122 u8 already_used; 123 }; 124 125 struct ib_umad_packet { 126 struct ib_mad_send_buf *msg; 127 struct ib_mad_recv_wc *recv_wc; 128 struct list_head list; 129 int length; 130 struct ib_user_mad mad; 131 }; 132 133 #define CREATE_TRACE_POINTS 134 #include <trace/events/ib_umad.h> 135 136 static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 137 static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) + 138 IB_UMAD_NUM_FIXED_MINOR; 139 static dev_t dynamic_umad_dev; 140 static dev_t dynamic_issm_dev; 141 142 static DEFINE_IDA(umad_ida); 143 144 static void ib_umad_add_one(struct ib_device *device); 145 static void ib_umad_remove_one(struct ib_device *device, void *client_data); 146 147 static void ib_umad_dev_free(struct kref *kref) 148 { 149 struct ib_umad_device *dev = 150 container_of(kref, struct ib_umad_device, kref); 151 152 kfree(dev); 153 } 154 155 static void ib_umad_dev_get(struct ib_umad_device *dev) 156 { 157 kref_get(&dev->kref); 158 } 159 160 static void ib_umad_dev_put(struct ib_umad_device *dev) 161 { 162 kref_put(&dev->kref, ib_umad_dev_free); 163 } 164 165 static int hdr_size(struct ib_umad_file *file) 166 { 167 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : 168 sizeof (struct ib_user_mad_hdr_old); 169 } 170 171 /* caller must hold file->mutex */ 172 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) 173 { 174 return file->agents_dead ? NULL : file->agent[id]; 175 } 176 177 static int queue_packet(struct ib_umad_file *file, 178 struct ib_mad_agent *agent, 179 struct ib_umad_packet *packet) 180 { 181 int ret = 1; 182 183 mutex_lock(&file->mutex); 184 185 for (packet->mad.hdr.id = 0; 186 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; 187 packet->mad.hdr.id++) 188 if (agent == __get_agent(file, packet->mad.hdr.id)) { 189 list_add_tail(&packet->list, &file->recv_list); 190 wake_up_interruptible(&file->recv_wait); 191 ret = 0; 192 break; 193 } 194 195 mutex_unlock(&file->mutex); 196 197 return ret; 198 } 199 200 static void dequeue_send(struct ib_umad_file *file, 201 struct ib_umad_packet *packet) 202 { 203 spin_lock_irq(&file->send_lock); 204 list_del(&packet->list); 205 spin_unlock_irq(&file->send_lock); 206 } 207 208 static void send_handler(struct ib_mad_agent *agent, 209 struct ib_mad_send_wc *send_wc) 210 { 211 struct ib_umad_file *file = agent->context; 212 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 213 214 dequeue_send(file, packet); 215 rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE); 216 ib_free_send_mad(packet->msg); 217 218 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 219 packet->length = IB_MGMT_MAD_HDR; 220 packet->mad.hdr.status = ETIMEDOUT; 221 if (!queue_packet(file, agent, packet)) 222 return; 223 } 224 kfree(packet); 225 } 226 227 static void recv_handler(struct ib_mad_agent *agent, 228 struct ib_mad_send_buf *send_buf, 229 struct ib_mad_recv_wc *mad_recv_wc) 230 { 231 struct ib_umad_file *file = agent->context; 232 struct ib_umad_packet *packet; 233 234 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 235 goto err1; 236 237 packet = kzalloc(sizeof *packet, GFP_KERNEL); 238 if (!packet) 239 goto err1; 240 241 packet->length = mad_recv_wc->mad_len; 242 packet->recv_wc = mad_recv_wc; 243 244 packet->mad.hdr.status = 0; 245 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; 246 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 247 /* 248 * On OPA devices it is okay to lose the upper 16 bits of LID as this 249 * information is obtained elsewhere. Mask off the upper 16 bits. 250 */ 251 if (rdma_cap_opa_mad(agent->device, agent->port_num)) 252 packet->mad.hdr.lid = ib_lid_be16(0xFFFF & 253 mad_recv_wc->wc->slid); 254 else 255 packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); 256 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 257 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 258 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; 259 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 260 if (packet->mad.hdr.grh_present) { 261 struct rdma_ah_attr ah_attr; 262 const struct ib_global_route *grh; 263 int ret; 264 265 ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num, 266 mad_recv_wc->wc, 267 mad_recv_wc->recv_buf.grh, 268 &ah_attr); 269 if (ret) 270 goto err2; 271 272 grh = rdma_ah_read_grh(&ah_attr); 273 packet->mad.hdr.gid_index = grh->sgid_index; 274 packet->mad.hdr.hop_limit = grh->hop_limit; 275 packet->mad.hdr.traffic_class = grh->traffic_class; 276 memcpy(packet->mad.hdr.gid, &grh->dgid, 16); 277 packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label); 278 rdma_destroy_ah_attr(&ah_attr); 279 } 280 281 if (queue_packet(file, agent, packet)) 282 goto err2; 283 return; 284 285 err2: 286 kfree(packet); 287 err1: 288 ib_free_recv_mad(mad_recv_wc); 289 } 290 291 static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, 292 struct ib_umad_packet *packet, size_t count) 293 { 294 struct ib_mad_recv_buf *recv_buf; 295 int left, seg_payload, offset, max_seg_payload; 296 size_t seg_size; 297 298 recv_buf = &packet->recv_wc->recv_buf; 299 seg_size = packet->recv_wc->mad_seg_size; 300 301 /* We need enough room to copy the first (or only) MAD segment. */ 302 if ((packet->length <= seg_size && 303 count < hdr_size(file) + packet->length) || 304 (packet->length > seg_size && 305 count < hdr_size(file) + seg_size)) 306 return -EINVAL; 307 308 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 309 return -EFAULT; 310 311 buf += hdr_size(file); 312 seg_payload = min_t(int, packet->length, seg_size); 313 if (copy_to_user(buf, recv_buf->mad, seg_payload)) 314 return -EFAULT; 315 316 if (seg_payload < packet->length) { 317 /* 318 * Multipacket RMPP MAD message. Copy remainder of message. 319 * Note that last segment may have a shorter payload. 320 */ 321 if (count < hdr_size(file) + packet->length) { 322 /* 323 * The buffer is too small, return the first RMPP segment, 324 * which includes the RMPP message length. 325 */ 326 return -ENOSPC; 327 } 328 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 329 max_seg_payload = seg_size - offset; 330 331 for (left = packet->length - seg_payload, buf += seg_payload; 332 left; left -= seg_payload, buf += seg_payload) { 333 recv_buf = container_of(recv_buf->list.next, 334 struct ib_mad_recv_buf, list); 335 seg_payload = min(left, max_seg_payload); 336 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset, 337 seg_payload)) 338 return -EFAULT; 339 } 340 } 341 342 trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr); 343 344 return hdr_size(file) + packet->length; 345 } 346 347 static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, 348 struct ib_umad_packet *packet, size_t count) 349 { 350 ssize_t size = hdr_size(file) + packet->length; 351 352 if (count < size) 353 return -EINVAL; 354 355 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 356 return -EFAULT; 357 358 buf += hdr_size(file); 359 360 if (copy_to_user(buf, packet->mad.data, packet->length)) 361 return -EFAULT; 362 363 trace_ib_umad_read_send(file, &packet->mad.hdr, 364 (struct ib_mad_hdr *)&packet->mad.data); 365 366 return size; 367 } 368 369 static ssize_t ib_umad_read(struct file *filp, char __user *buf, 370 size_t count, loff_t *pos) 371 { 372 struct ib_umad_file *file = filp->private_data; 373 struct ib_umad_packet *packet; 374 ssize_t ret; 375 376 if (count < hdr_size(file)) 377 return -EINVAL; 378 379 mutex_lock(&file->mutex); 380 381 while (list_empty(&file->recv_list)) { 382 mutex_unlock(&file->mutex); 383 384 if (filp->f_flags & O_NONBLOCK) 385 return -EAGAIN; 386 387 if (wait_event_interruptible(file->recv_wait, 388 !list_empty(&file->recv_list))) 389 return -ERESTARTSYS; 390 391 mutex_lock(&file->mutex); 392 } 393 394 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); 395 list_del(&packet->list); 396 397 mutex_unlock(&file->mutex); 398 399 if (packet->recv_wc) 400 ret = copy_recv_mad(file, buf, packet, count); 401 else 402 ret = copy_send_mad(file, buf, packet, count); 403 404 if (ret < 0) { 405 /* Requeue packet */ 406 mutex_lock(&file->mutex); 407 list_add(&packet->list, &file->recv_list); 408 mutex_unlock(&file->mutex); 409 } else { 410 if (packet->recv_wc) 411 ib_free_recv_mad(packet->recv_wc); 412 kfree(packet); 413 } 414 return ret; 415 } 416 417 static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) 418 { 419 int left, seg; 420 421 /* Copy class specific header */ 422 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && 423 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, 424 msg->hdr_len - IB_MGMT_RMPP_HDR)) 425 return -EFAULT; 426 427 /* All headers are in place. Copy data segments. */ 428 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; 429 seg++, left -= msg->seg_size, buf += msg->seg_size) { 430 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, 431 min(left, msg->seg_size))) 432 return -EFAULT; 433 } 434 return 0; 435 } 436 437 static int same_destination(struct ib_user_mad_hdr *hdr1, 438 struct ib_user_mad_hdr *hdr2) 439 { 440 if (!hdr1->grh_present && !hdr2->grh_present) 441 return (hdr1->lid == hdr2->lid); 442 443 if (hdr1->grh_present && hdr2->grh_present) 444 return !memcmp(hdr1->gid, hdr2->gid, 16); 445 446 return 0; 447 } 448 449 static int is_duplicate(struct ib_umad_file *file, 450 struct ib_umad_packet *packet) 451 { 452 struct ib_umad_packet *sent_packet; 453 struct ib_mad_hdr *sent_hdr, *hdr; 454 455 hdr = (struct ib_mad_hdr *) packet->mad.data; 456 list_for_each_entry(sent_packet, &file->send_list, list) { 457 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; 458 459 if ((hdr->tid != sent_hdr->tid) || 460 (hdr->mgmt_class != sent_hdr->mgmt_class)) 461 continue; 462 463 /* 464 * No need to be overly clever here. If two new operations have 465 * the same TID, reject the second as a duplicate. This is more 466 * restrictive than required by the spec. 467 */ 468 if (!ib_response_mad(hdr)) { 469 if (!ib_response_mad(sent_hdr)) 470 return 1; 471 continue; 472 } else if (!ib_response_mad(sent_hdr)) 473 continue; 474 475 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) 476 return 1; 477 } 478 479 return 0; 480 } 481 482 static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 483 size_t count, loff_t *pos) 484 { 485 struct ib_umad_file *file = filp->private_data; 486 struct ib_umad_packet *packet; 487 struct ib_mad_agent *agent; 488 struct rdma_ah_attr ah_attr; 489 struct ib_ah *ah; 490 struct ib_rmpp_mad *rmpp_mad; 491 __be64 *tid; 492 int ret, data_len, hdr_len, copy_offset, rmpp_active; 493 u8 base_version; 494 495 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) 496 return -EINVAL; 497 498 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); 499 if (!packet) 500 return -ENOMEM; 501 502 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { 503 ret = -EFAULT; 504 goto err; 505 } 506 507 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { 508 ret = -EINVAL; 509 goto err; 510 } 511 512 buf += hdr_size(file); 513 514 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { 515 ret = -EFAULT; 516 goto err; 517 } 518 519 mutex_lock(&file->mutex); 520 521 trace_ib_umad_write(file, &packet->mad.hdr, 522 (struct ib_mad_hdr *)&packet->mad.data); 523 524 agent = __get_agent(file, packet->mad.hdr.id); 525 if (!agent) { 526 ret = -EINVAL; 527 goto err_up; 528 } 529 530 memset(&ah_attr, 0, sizeof ah_attr); 531 ah_attr.type = rdma_ah_find_type(agent->device, 532 file->port->port_num); 533 rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); 534 rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); 535 rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits); 536 rdma_ah_set_port_num(&ah_attr, file->port->port_num); 537 if (packet->mad.hdr.grh_present) { 538 rdma_ah_set_grh(&ah_attr, NULL, 539 be32_to_cpu(packet->mad.hdr.flow_label), 540 packet->mad.hdr.gid_index, 541 packet->mad.hdr.hop_limit, 542 packet->mad.hdr.traffic_class); 543 rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid); 544 } 545 546 ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL); 547 if (IS_ERR(ah)) { 548 ret = PTR_ERR(ah); 549 goto err_up; 550 } 551 552 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 553 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 554 555 if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 556 && ib_mad_kernel_rmpp_agent(agent)) { 557 copy_offset = IB_MGMT_RMPP_HDR; 558 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 559 IB_MGMT_RMPP_FLAG_ACTIVE; 560 } else { 561 copy_offset = IB_MGMT_MAD_HDR; 562 rmpp_active = 0; 563 } 564 565 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; 566 data_len = count - hdr_size(file) - hdr_len; 567 packet->msg = ib_create_send_mad(agent, 568 be32_to_cpu(packet->mad.hdr.qpn), 569 packet->mad.hdr.pkey_index, rmpp_active, 570 hdr_len, data_len, GFP_KERNEL, 571 base_version); 572 if (IS_ERR(packet->msg)) { 573 ret = PTR_ERR(packet->msg); 574 goto err_ah; 575 } 576 577 packet->msg->ah = ah; 578 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 579 packet->msg->retries = packet->mad.hdr.retries; 580 packet->msg->context[0] = packet; 581 582 /* Copy MAD header. Any RMPP header is already in place. */ 583 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 584 585 if (!rmpp_active) { 586 if (copy_from_user(packet->msg->mad + copy_offset, 587 buf + copy_offset, 588 hdr_len + data_len - copy_offset)) { 589 ret = -EFAULT; 590 goto err_msg; 591 } 592 } else { 593 ret = copy_rmpp_mad(packet->msg, buf); 594 if (ret) 595 goto err_msg; 596 } 597 598 /* 599 * Set the high-order part of the transaction ID to make MADs from 600 * different agents unique, and allow routing responses back to the 601 * original requestor. 602 */ 603 if (!ib_response_mad(packet->msg->mad)) { 604 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 605 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 606 (be64_to_cpup(tid) & 0xffffffff)); 607 rmpp_mad->mad_hdr.tid = *tid; 608 } 609 610 if (!ib_mad_kernel_rmpp_agent(agent) 611 && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 612 && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { 613 spin_lock_irq(&file->send_lock); 614 list_add_tail(&packet->list, &file->send_list); 615 spin_unlock_irq(&file->send_lock); 616 } else { 617 spin_lock_irq(&file->send_lock); 618 ret = is_duplicate(file, packet); 619 if (!ret) 620 list_add_tail(&packet->list, &file->send_list); 621 spin_unlock_irq(&file->send_lock); 622 if (ret) { 623 ret = -EINVAL; 624 goto err_msg; 625 } 626 } 627 628 ret = ib_post_send_mad(packet->msg, NULL); 629 if (ret) 630 goto err_send; 631 632 mutex_unlock(&file->mutex); 633 return count; 634 635 err_send: 636 dequeue_send(file, packet); 637 err_msg: 638 ib_free_send_mad(packet->msg); 639 err_ah: 640 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); 641 err_up: 642 mutex_unlock(&file->mutex); 643 err: 644 kfree(packet); 645 return ret; 646 } 647 648 static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) 649 { 650 struct ib_umad_file *file = filp->private_data; 651 652 /* we will always be able to post a MAD send */ 653 __poll_t mask = EPOLLOUT | EPOLLWRNORM; 654 655 poll_wait(filp, &file->recv_wait, wait); 656 657 if (!list_empty(&file->recv_list)) 658 mask |= EPOLLIN | EPOLLRDNORM; 659 660 return mask; 661 } 662 663 static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, 664 int compat_method_mask) 665 { 666 struct ib_user_mad_reg_req ureq; 667 struct ib_mad_reg_req req; 668 struct ib_mad_agent *agent = NULL; 669 int agent_id; 670 int ret; 671 672 mutex_lock(&file->port->file_mutex); 673 mutex_lock(&file->mutex); 674 675 if (!file->port->ib_dev) { 676 dev_notice(&file->port->dev, 677 "ib_umad_reg_agent: invalid device\n"); 678 ret = -EPIPE; 679 goto out; 680 } 681 682 if (copy_from_user(&ureq, arg, sizeof ureq)) { 683 ret = -EFAULT; 684 goto out; 685 } 686 687 if (ureq.qpn != 0 && ureq.qpn != 1) { 688 dev_notice(&file->port->dev, 689 "ib_umad_reg_agent: invalid QPN %d specified\n", 690 ureq.qpn); 691 ret = -EINVAL; 692 goto out; 693 } 694 695 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 696 if (!__get_agent(file, agent_id)) 697 goto found; 698 699 dev_notice(&file->port->dev, 700 "ib_umad_reg_agent: Max Agents (%u) reached\n", 701 IB_UMAD_MAX_AGENTS); 702 ret = -ENOMEM; 703 goto out; 704 705 found: 706 if (ureq.mgmt_class) { 707 memset(&req, 0, sizeof(req)); 708 req.mgmt_class = ureq.mgmt_class; 709 req.mgmt_class_version = ureq.mgmt_class_version; 710 memcpy(req.oui, ureq.oui, sizeof req.oui); 711 712 if (compat_method_mask) { 713 u32 *umm = (u32 *) ureq.method_mask; 714 int i; 715 716 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) 717 req.method_mask[i] = 718 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); 719 } else 720 memcpy(req.method_mask, ureq.method_mask, 721 sizeof req.method_mask); 722 } 723 724 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 725 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 726 ureq.mgmt_class ? &req : NULL, 727 ureq.rmpp_version, 728 send_handler, recv_handler, file, 0); 729 if (IS_ERR(agent)) { 730 ret = PTR_ERR(agent); 731 agent = NULL; 732 goto out; 733 } 734 735 if (put_user(agent_id, 736 (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { 737 ret = -EFAULT; 738 goto out; 739 } 740 741 if (!file->already_used) { 742 file->already_used = 1; 743 if (!file->use_pkey_index) { 744 dev_warn(&file->port->dev, 745 "process %s did not enable P_Key index support.\n", 746 current->comm); 747 dev_warn(&file->port->dev, 748 " Documentation/infiniband/user_mad.rst has info on the new ABI.\n"); 749 } 750 } 751 752 file->agent[agent_id] = agent; 753 ret = 0; 754 755 out: 756 mutex_unlock(&file->mutex); 757 758 if (ret && agent) 759 ib_unregister_mad_agent(agent); 760 761 mutex_unlock(&file->port->file_mutex); 762 763 return ret; 764 } 765 766 static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) 767 { 768 struct ib_user_mad_reg_req2 ureq; 769 struct ib_mad_reg_req req; 770 struct ib_mad_agent *agent = NULL; 771 int agent_id; 772 int ret; 773 774 mutex_lock(&file->port->file_mutex); 775 mutex_lock(&file->mutex); 776 777 if (!file->port->ib_dev) { 778 dev_notice(&file->port->dev, 779 "ib_umad_reg_agent2: invalid device\n"); 780 ret = -EPIPE; 781 goto out; 782 } 783 784 if (copy_from_user(&ureq, arg, sizeof(ureq))) { 785 ret = -EFAULT; 786 goto out; 787 } 788 789 if (ureq.qpn != 0 && ureq.qpn != 1) { 790 dev_notice(&file->port->dev, 791 "ib_umad_reg_agent2: invalid QPN %d specified\n", 792 ureq.qpn); 793 ret = -EINVAL; 794 goto out; 795 } 796 797 if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { 798 dev_notice(&file->port->dev, 799 "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", 800 ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); 801 ret = -EINVAL; 802 803 if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, 804 (u32 __user *) (arg + offsetof(struct 805 ib_user_mad_reg_req2, flags)))) 806 ret = -EFAULT; 807 808 goto out; 809 } 810 811 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 812 if (!__get_agent(file, agent_id)) 813 goto found; 814 815 dev_notice(&file->port->dev, 816 "ib_umad_reg_agent2: Max Agents (%u) reached\n", 817 IB_UMAD_MAX_AGENTS); 818 ret = -ENOMEM; 819 goto out; 820 821 found: 822 if (ureq.mgmt_class) { 823 memset(&req, 0, sizeof(req)); 824 req.mgmt_class = ureq.mgmt_class; 825 req.mgmt_class_version = ureq.mgmt_class_version; 826 if (ureq.oui & 0xff000000) { 827 dev_notice(&file->port->dev, 828 "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", 829 ureq.oui); 830 ret = -EINVAL; 831 goto out; 832 } 833 req.oui[2] = ureq.oui & 0x0000ff; 834 req.oui[1] = (ureq.oui & 0x00ff00) >> 8; 835 req.oui[0] = (ureq.oui & 0xff0000) >> 16; 836 memcpy(req.method_mask, ureq.method_mask, 837 sizeof(req.method_mask)); 838 } 839 840 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 841 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 842 ureq.mgmt_class ? &req : NULL, 843 ureq.rmpp_version, 844 send_handler, recv_handler, file, 845 ureq.flags); 846 if (IS_ERR(agent)) { 847 ret = PTR_ERR(agent); 848 agent = NULL; 849 goto out; 850 } 851 852 if (put_user(agent_id, 853 (u32 __user *)(arg + 854 offsetof(struct ib_user_mad_reg_req2, id)))) { 855 ret = -EFAULT; 856 goto out; 857 } 858 859 if (!file->already_used) { 860 file->already_used = 1; 861 file->use_pkey_index = 1; 862 } 863 864 file->agent[agent_id] = agent; 865 ret = 0; 866 867 out: 868 mutex_unlock(&file->mutex); 869 870 if (ret && agent) 871 ib_unregister_mad_agent(agent); 872 873 mutex_unlock(&file->port->file_mutex); 874 875 return ret; 876 } 877 878 879 static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) 880 { 881 struct ib_mad_agent *agent = NULL; 882 u32 id; 883 int ret = 0; 884 885 if (get_user(id, arg)) 886 return -EFAULT; 887 888 mutex_lock(&file->port->file_mutex); 889 mutex_lock(&file->mutex); 890 891 if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { 892 ret = -EINVAL; 893 goto out; 894 } 895 896 agent = file->agent[id]; 897 file->agent[id] = NULL; 898 899 out: 900 mutex_unlock(&file->mutex); 901 902 if (agent) 903 ib_unregister_mad_agent(agent); 904 905 mutex_unlock(&file->port->file_mutex); 906 907 return ret; 908 } 909 910 static long ib_umad_enable_pkey(struct ib_umad_file *file) 911 { 912 int ret = 0; 913 914 mutex_lock(&file->mutex); 915 if (file->already_used) 916 ret = -EINVAL; 917 else 918 file->use_pkey_index = 1; 919 mutex_unlock(&file->mutex); 920 921 return ret; 922 } 923 924 static long ib_umad_ioctl(struct file *filp, unsigned int cmd, 925 unsigned long arg) 926 { 927 switch (cmd) { 928 case IB_USER_MAD_REGISTER_AGENT: 929 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); 930 case IB_USER_MAD_UNREGISTER_AGENT: 931 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); 932 case IB_USER_MAD_ENABLE_PKEY: 933 return ib_umad_enable_pkey(filp->private_data); 934 case IB_USER_MAD_REGISTER_AGENT2: 935 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); 936 default: 937 return -ENOIOCTLCMD; 938 } 939 } 940 941 #ifdef CONFIG_COMPAT 942 static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, 943 unsigned long arg) 944 { 945 switch (cmd) { 946 case IB_USER_MAD_REGISTER_AGENT: 947 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); 948 case IB_USER_MAD_UNREGISTER_AGENT: 949 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); 950 case IB_USER_MAD_ENABLE_PKEY: 951 return ib_umad_enable_pkey(filp->private_data); 952 case IB_USER_MAD_REGISTER_AGENT2: 953 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); 954 default: 955 return -ENOIOCTLCMD; 956 } 957 } 958 #endif 959 960 /* 961 * ib_umad_open() does not need the BKL: 962 * 963 * - the ib_umad_port structures are properly reference counted, and 964 * everything else is purely local to the file being created, so 965 * races against other open calls are not a problem; 966 * - the ioctl method does not affect any global state outside of the 967 * file structure being operated on; 968 */ 969 static int ib_umad_open(struct inode *inode, struct file *filp) 970 { 971 struct ib_umad_port *port; 972 struct ib_umad_file *file; 973 int ret = 0; 974 975 port = container_of(inode->i_cdev, struct ib_umad_port, cdev); 976 977 mutex_lock(&port->file_mutex); 978 979 if (!port->ib_dev) { 980 ret = -ENXIO; 981 goto out; 982 } 983 984 if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { 985 ret = -EPERM; 986 goto out; 987 } 988 989 file = kzalloc(sizeof(*file), GFP_KERNEL); 990 if (!file) { 991 ret = -ENOMEM; 992 goto out; 993 } 994 995 mutex_init(&file->mutex); 996 spin_lock_init(&file->send_lock); 997 INIT_LIST_HEAD(&file->recv_list); 998 INIT_LIST_HEAD(&file->send_list); 999 init_waitqueue_head(&file->recv_wait); 1000 1001 file->port = port; 1002 filp->private_data = file; 1003 1004 list_add_tail(&file->port_list, &port->file_list); 1005 1006 stream_open(inode, filp); 1007 out: 1008 mutex_unlock(&port->file_mutex); 1009 return ret; 1010 } 1011 1012 static int ib_umad_close(struct inode *inode, struct file *filp) 1013 { 1014 struct ib_umad_file *file = filp->private_data; 1015 struct ib_umad_packet *packet, *tmp; 1016 int already_dead; 1017 int i; 1018 1019 mutex_lock(&file->port->file_mutex); 1020 mutex_lock(&file->mutex); 1021 1022 already_dead = file->agents_dead; 1023 file->agents_dead = 1; 1024 1025 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { 1026 if (packet->recv_wc) 1027 ib_free_recv_mad(packet->recv_wc); 1028 kfree(packet); 1029 } 1030 1031 list_del(&file->port_list); 1032 1033 mutex_unlock(&file->mutex); 1034 1035 if (!already_dead) 1036 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) 1037 if (file->agent[i]) 1038 ib_unregister_mad_agent(file->agent[i]); 1039 1040 mutex_unlock(&file->port->file_mutex); 1041 1042 kfree(file); 1043 return 0; 1044 } 1045 1046 static const struct file_operations umad_fops = { 1047 .owner = THIS_MODULE, 1048 .read = ib_umad_read, 1049 .write = ib_umad_write, 1050 .poll = ib_umad_poll, 1051 .unlocked_ioctl = ib_umad_ioctl, 1052 #ifdef CONFIG_COMPAT 1053 .compat_ioctl = ib_umad_compat_ioctl, 1054 #endif 1055 .open = ib_umad_open, 1056 .release = ib_umad_close, 1057 .llseek = no_llseek, 1058 }; 1059 1060 static int ib_umad_sm_open(struct inode *inode, struct file *filp) 1061 { 1062 struct ib_umad_port *port; 1063 struct ib_port_modify props = { 1064 .set_port_cap_mask = IB_PORT_SM 1065 }; 1066 int ret; 1067 1068 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); 1069 1070 if (filp->f_flags & O_NONBLOCK) { 1071 if (down_trylock(&port->sm_sem)) { 1072 ret = -EAGAIN; 1073 goto fail; 1074 } 1075 } else { 1076 if (down_interruptible(&port->sm_sem)) { 1077 ret = -ERESTARTSYS; 1078 goto fail; 1079 } 1080 } 1081 1082 if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { 1083 ret = -EPERM; 1084 goto err_up_sem; 1085 } 1086 1087 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1088 if (ret) 1089 goto err_up_sem; 1090 1091 filp->private_data = port; 1092 1093 nonseekable_open(inode, filp); 1094 return 0; 1095 1096 err_up_sem: 1097 up(&port->sm_sem); 1098 1099 fail: 1100 return ret; 1101 } 1102 1103 static int ib_umad_sm_close(struct inode *inode, struct file *filp) 1104 { 1105 struct ib_umad_port *port = filp->private_data; 1106 struct ib_port_modify props = { 1107 .clr_port_cap_mask = IB_PORT_SM 1108 }; 1109 int ret = 0; 1110 1111 mutex_lock(&port->file_mutex); 1112 if (port->ib_dev) 1113 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1114 mutex_unlock(&port->file_mutex); 1115 1116 up(&port->sm_sem); 1117 1118 return ret; 1119 } 1120 1121 static const struct file_operations umad_sm_fops = { 1122 .owner = THIS_MODULE, 1123 .open = ib_umad_sm_open, 1124 .release = ib_umad_sm_close, 1125 .llseek = no_llseek, 1126 }; 1127 1128 static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data, 1129 struct ib_client_nl_info *res) 1130 { 1131 struct ib_umad_device *umad_dev = client_data; 1132 1133 if (!rdma_is_port_valid(ibdev, res->port)) 1134 return -EINVAL; 1135 1136 res->abi = IB_USER_MAD_ABI_VERSION; 1137 res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev; 1138 1139 return 0; 1140 } 1141 1142 static struct ib_client umad_client = { 1143 .name = "umad", 1144 .add = ib_umad_add_one, 1145 .remove = ib_umad_remove_one, 1146 .get_nl_info = ib_umad_get_nl_info, 1147 }; 1148 MODULE_ALIAS_RDMA_CLIENT("umad"); 1149 1150 static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data, 1151 struct ib_client_nl_info *res) 1152 { 1153 struct ib_umad_device *umad_dev = 1154 ib_get_client_data(ibdev, &umad_client); 1155 1156 if (!rdma_is_port_valid(ibdev, res->port)) 1157 return -EINVAL; 1158 1159 res->abi = IB_USER_MAD_ABI_VERSION; 1160 res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev; 1161 1162 return 0; 1163 } 1164 1165 static struct ib_client issm_client = { 1166 .name = "issm", 1167 .get_nl_info = ib_issm_get_nl_info, 1168 }; 1169 MODULE_ALIAS_RDMA_CLIENT("issm"); 1170 1171 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr, 1172 char *buf) 1173 { 1174 struct ib_umad_port *port = dev_get_drvdata(dev); 1175 1176 if (!port) 1177 return -ENODEV; 1178 1179 return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev)); 1180 } 1181 static DEVICE_ATTR_RO(ibdev); 1182 1183 static ssize_t port_show(struct device *dev, struct device_attribute *attr, 1184 char *buf) 1185 { 1186 struct ib_umad_port *port = dev_get_drvdata(dev); 1187 1188 if (!port) 1189 return -ENODEV; 1190 1191 return sprintf(buf, "%d\n", port->port_num); 1192 } 1193 static DEVICE_ATTR_RO(port); 1194 1195 static struct attribute *umad_class_dev_attrs[] = { 1196 &dev_attr_ibdev.attr, 1197 &dev_attr_port.attr, 1198 NULL, 1199 }; 1200 ATTRIBUTE_GROUPS(umad_class_dev); 1201 1202 static char *umad_devnode(struct device *dev, umode_t *mode) 1203 { 1204 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 1205 } 1206 1207 static ssize_t abi_version_show(struct class *class, 1208 struct class_attribute *attr, char *buf) 1209 { 1210 return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION); 1211 } 1212 static CLASS_ATTR_RO(abi_version); 1213 1214 static struct attribute *umad_class_attrs[] = { 1215 &class_attr_abi_version.attr, 1216 NULL, 1217 }; 1218 ATTRIBUTE_GROUPS(umad_class); 1219 1220 static struct class umad_class = { 1221 .name = "infiniband_mad", 1222 .devnode = umad_devnode, 1223 .class_groups = umad_class_groups, 1224 .dev_groups = umad_class_dev_groups, 1225 }; 1226 1227 static void ib_umad_release_port(struct device *device) 1228 { 1229 struct ib_umad_port *port = dev_get_drvdata(device); 1230 struct ib_umad_device *umad_dev = port->umad_dev; 1231 1232 ib_umad_dev_put(umad_dev); 1233 } 1234 1235 static void ib_umad_init_port_dev(struct device *dev, 1236 struct ib_umad_port *port, 1237 const struct ib_device *device) 1238 { 1239 device_initialize(dev); 1240 ib_umad_dev_get(port->umad_dev); 1241 dev->class = &umad_class; 1242 dev->parent = device->dev.parent; 1243 dev_set_drvdata(dev, port); 1244 dev->release = ib_umad_release_port; 1245 } 1246 1247 static int ib_umad_init_port(struct ib_device *device, int port_num, 1248 struct ib_umad_device *umad_dev, 1249 struct ib_umad_port *port) 1250 { 1251 int devnum; 1252 dev_t base_umad; 1253 dev_t base_issm; 1254 int ret; 1255 1256 devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL); 1257 if (devnum < 0) 1258 return -1; 1259 port->dev_num = devnum; 1260 if (devnum >= IB_UMAD_NUM_FIXED_MINOR) { 1261 base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR; 1262 base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR; 1263 } else { 1264 base_umad = devnum + base_umad_dev; 1265 base_issm = devnum + base_issm_dev; 1266 } 1267 1268 port->ib_dev = device; 1269 port->umad_dev = umad_dev; 1270 port->port_num = port_num; 1271 sema_init(&port->sm_sem, 1); 1272 mutex_init(&port->file_mutex); 1273 INIT_LIST_HEAD(&port->file_list); 1274 1275 ib_umad_init_port_dev(&port->dev, port, device); 1276 port->dev.devt = base_umad; 1277 dev_set_name(&port->dev, "umad%d", port->dev_num); 1278 cdev_init(&port->cdev, &umad_fops); 1279 port->cdev.owner = THIS_MODULE; 1280 1281 ret = cdev_device_add(&port->cdev, &port->dev); 1282 if (ret) 1283 goto err_cdev; 1284 1285 ib_umad_init_port_dev(&port->sm_dev, port, device); 1286 port->sm_dev.devt = base_issm; 1287 dev_set_name(&port->sm_dev, "issm%d", port->dev_num); 1288 cdev_init(&port->sm_cdev, &umad_sm_fops); 1289 port->sm_cdev.owner = THIS_MODULE; 1290 1291 ret = cdev_device_add(&port->sm_cdev, &port->sm_dev); 1292 if (ret) 1293 goto err_dev; 1294 1295 return 0; 1296 1297 err_dev: 1298 put_device(&port->sm_dev); 1299 cdev_device_del(&port->cdev, &port->dev); 1300 err_cdev: 1301 put_device(&port->dev); 1302 ida_free(&umad_ida, devnum); 1303 return ret; 1304 } 1305 1306 static void ib_umad_kill_port(struct ib_umad_port *port) 1307 { 1308 struct ib_umad_file *file; 1309 int id; 1310 1311 mutex_lock(&port->file_mutex); 1312 1313 /* Mark ib_dev NULL and block ioctl or other file ops to progress 1314 * further. 1315 */ 1316 port->ib_dev = NULL; 1317 1318 list_for_each_entry(file, &port->file_list, port_list) { 1319 mutex_lock(&file->mutex); 1320 file->agents_dead = 1; 1321 mutex_unlock(&file->mutex); 1322 1323 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) 1324 if (file->agent[id]) 1325 ib_unregister_mad_agent(file->agent[id]); 1326 } 1327 1328 mutex_unlock(&port->file_mutex); 1329 1330 cdev_device_del(&port->sm_cdev, &port->sm_dev); 1331 cdev_device_del(&port->cdev, &port->dev); 1332 ida_free(&umad_ida, port->dev_num); 1333 1334 /* balances device_initialize() */ 1335 put_device(&port->sm_dev); 1336 put_device(&port->dev); 1337 } 1338 1339 static void ib_umad_add_one(struct ib_device *device) 1340 { 1341 struct ib_umad_device *umad_dev; 1342 int s, e, i; 1343 int count = 0; 1344 1345 s = rdma_start_port(device); 1346 e = rdma_end_port(device); 1347 1348 umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL); 1349 if (!umad_dev) 1350 return; 1351 1352 kref_init(&umad_dev->kref); 1353 for (i = s; i <= e; ++i) { 1354 if (!rdma_cap_ib_mad(device, i)) 1355 continue; 1356 1357 if (ib_umad_init_port(device, i, umad_dev, 1358 &umad_dev->ports[i - s])) 1359 goto err; 1360 1361 count++; 1362 } 1363 1364 if (!count) 1365 goto free; 1366 1367 ib_set_client_data(device, &umad_client, umad_dev); 1368 1369 return; 1370 1371 err: 1372 while (--i >= s) { 1373 if (!rdma_cap_ib_mad(device, i)) 1374 continue; 1375 1376 ib_umad_kill_port(&umad_dev->ports[i - s]); 1377 } 1378 free: 1379 /* balances kref_init */ 1380 ib_umad_dev_put(umad_dev); 1381 } 1382 1383 static void ib_umad_remove_one(struct ib_device *device, void *client_data) 1384 { 1385 struct ib_umad_device *umad_dev = client_data; 1386 unsigned int i; 1387 1388 if (!umad_dev) 1389 return; 1390 1391 rdma_for_each_port (device, i) { 1392 if (rdma_cap_ib_mad(device, i)) 1393 ib_umad_kill_port( 1394 &umad_dev->ports[i - rdma_start_port(device)]); 1395 } 1396 /* balances kref_init() */ 1397 ib_umad_dev_put(umad_dev); 1398 } 1399 1400 static int __init ib_umad_init(void) 1401 { 1402 int ret; 1403 1404 ret = register_chrdev_region(base_umad_dev, 1405 IB_UMAD_NUM_FIXED_MINOR * 2, 1406 umad_class.name); 1407 if (ret) { 1408 pr_err("couldn't register device number\n"); 1409 goto out; 1410 } 1411 1412 ret = alloc_chrdev_region(&dynamic_umad_dev, 0, 1413 IB_UMAD_NUM_DYNAMIC_MINOR * 2, 1414 umad_class.name); 1415 if (ret) { 1416 pr_err("couldn't register dynamic device number\n"); 1417 goto out_alloc; 1418 } 1419 dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR; 1420 1421 ret = class_register(&umad_class); 1422 if (ret) { 1423 pr_err("couldn't create class infiniband_mad\n"); 1424 goto out_chrdev; 1425 } 1426 1427 ret = ib_register_client(&umad_client); 1428 if (ret) 1429 goto out_class; 1430 1431 ret = ib_register_client(&issm_client); 1432 if (ret) 1433 goto out_client; 1434 1435 return 0; 1436 1437 out_client: 1438 ib_unregister_client(&umad_client); 1439 out_class: 1440 class_unregister(&umad_class); 1441 1442 out_chrdev: 1443 unregister_chrdev_region(dynamic_umad_dev, 1444 IB_UMAD_NUM_DYNAMIC_MINOR * 2); 1445 1446 out_alloc: 1447 unregister_chrdev_region(base_umad_dev, 1448 IB_UMAD_NUM_FIXED_MINOR * 2); 1449 1450 out: 1451 return ret; 1452 } 1453 1454 static void __exit ib_umad_cleanup(void) 1455 { 1456 ib_unregister_client(&issm_client); 1457 ib_unregister_client(&umad_client); 1458 class_unregister(&umad_class); 1459 unregister_chrdev_region(base_umad_dev, 1460 IB_UMAD_NUM_FIXED_MINOR * 2); 1461 unregister_chrdev_region(dynamic_umad_dev, 1462 IB_UMAD_NUM_DYNAMIC_MINOR * 2); 1463 } 1464 1465 module_init(ib_umad_init); 1466 module_exit(ib_umad_cleanup); 1467