1 /* 2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Neither the names of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU General Public License ("GPL") version 2 as published by the Free 18 * Software Foundation. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/pid.h> 35 #include <linux/pid_namespace.h> 36 #include <net/netlink.h> 37 #include <rdma/rdma_cm.h> 38 #include <rdma/rdma_netlink.h> 39 40 #include "core_priv.h" 41 #include "cma_priv.h" 42 43 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { 44 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, 45 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, 46 .len = IB_DEVICE_NAME_MAX - 1}, 47 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 48 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, 49 .len = IB_FW_VERSION_NAME_MAX - 1}, 50 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, 51 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, 52 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, 53 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, 54 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, 55 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, 56 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, 57 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, 58 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, 59 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, 60 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, 61 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING, 62 .len = 16 }, 63 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 }, 64 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, 65 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, 66 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, 67 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, 68 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, 69 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, 70 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, 71 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, 72 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, 73 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, 74 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, 75 .len = TASK_COMM_LEN }, 76 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, 77 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, 78 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, 79 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { 80 .len = sizeof(struct __kernel_sockaddr_storage) }, 81 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { 82 .len = sizeof(struct __kernel_sockaddr_storage) }, 83 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, 84 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, 85 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, 86 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, 87 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, 88 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, 89 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, 90 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, 91 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, 92 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, 93 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, 94 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, 95 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, 96 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, 97 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 }, 98 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, 99 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, 100 .len = IFNAMSIZ }, 101 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, 102 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, 103 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, 104 .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, 105 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, 106 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, 107 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, 108 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, 109 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, 110 }; 111 112 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, 113 enum rdma_nldev_print_type print_type) 114 { 115 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) 116 return -EMSGSIZE; 117 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && 118 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) 119 return -EMSGSIZE; 120 121 return 0; 122 } 123 124 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, 125 enum rdma_nldev_print_type print_type, 126 u32 value) 127 { 128 if (put_driver_name_print_type(msg, name, print_type)) 129 return -EMSGSIZE; 130 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) 131 return -EMSGSIZE; 132 133 return 0; 134 } 135 136 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, 137 enum rdma_nldev_print_type print_type, 138 u64 value) 139 { 140 if (put_driver_name_print_type(msg, name, print_type)) 141 return -EMSGSIZE; 142 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, 143 RDMA_NLDEV_ATTR_PAD)) 144 return -EMSGSIZE; 145 146 return 0; 147 } 148 149 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) 150 { 151 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 152 value); 153 } 154 EXPORT_SYMBOL(rdma_nl_put_driver_u32); 155 156 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, 157 u32 value) 158 { 159 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 160 value); 161 } 162 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); 163 164 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) 165 { 166 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 167 value); 168 } 169 EXPORT_SYMBOL(rdma_nl_put_driver_u64); 170 171 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) 172 { 173 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 174 value); 175 } 176 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); 177 178 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) 179 { 180 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) 181 return -EMSGSIZE; 182 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 183 dev_name(&device->dev))) 184 return -EMSGSIZE; 185 186 return 0; 187 } 188 189 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) 190 { 191 char fw[IB_FW_VERSION_NAME_MAX]; 192 193 if (fill_nldev_handle(msg, device)) 194 return -EMSGSIZE; 195 196 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) 197 return -EMSGSIZE; 198 199 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); 200 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 201 device->attrs.device_cap_flags, 202 RDMA_NLDEV_ATTR_PAD)) 203 return -EMSGSIZE; 204 205 ib_get_device_fw_str(device, fw); 206 /* Device without FW has strlen(fw) = 0 */ 207 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) 208 return -EMSGSIZE; 209 210 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, 211 be64_to_cpu(device->node_guid), 212 RDMA_NLDEV_ATTR_PAD)) 213 return -EMSGSIZE; 214 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, 215 be64_to_cpu(device->attrs.sys_image_guid), 216 RDMA_NLDEV_ATTR_PAD)) 217 return -EMSGSIZE; 218 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) 219 return -EMSGSIZE; 220 return 0; 221 } 222 223 static int fill_port_info(struct sk_buff *msg, 224 struct ib_device *device, u32 port, 225 const struct net *net) 226 { 227 struct net_device *netdev = NULL; 228 struct ib_port_attr attr; 229 int ret; 230 231 if (fill_nldev_handle(msg, device)) 232 return -EMSGSIZE; 233 234 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 235 return -EMSGSIZE; 236 237 ret = ib_query_port(device, port, &attr); 238 if (ret) 239 return ret; 240 241 if (rdma_protocol_ib(device, port)) { 242 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64)); 243 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 244 (u64)attr.port_cap_flags, 245 RDMA_NLDEV_ATTR_PAD)) 246 return -EMSGSIZE; 247 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, 248 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) 249 return -EMSGSIZE; 250 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) 251 return -EMSGSIZE; 252 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) 253 return -EMSGSIZE; 254 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) 255 return -EMSGSIZE; 256 } 257 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) 258 return -EMSGSIZE; 259 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 260 return -EMSGSIZE; 261 262 if (device->get_netdev) 263 netdev = device->get_netdev(device, port); 264 265 if (netdev && net_eq(dev_net(netdev), net)) { 266 ret = nla_put_u32(msg, 267 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 268 if (ret) 269 goto out; 270 ret = nla_put_string(msg, 271 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 272 } 273 274 out: 275 if (netdev) 276 dev_put(netdev); 277 return ret; 278 } 279 280 static int fill_res_info_entry(struct sk_buff *msg, 281 const char *name, u64 curr) 282 { 283 struct nlattr *entry_attr; 284 285 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); 286 if (!entry_attr) 287 return -EMSGSIZE; 288 289 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) 290 goto err; 291 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 292 RDMA_NLDEV_ATTR_PAD)) 293 goto err; 294 295 nla_nest_end(msg, entry_attr); 296 return 0; 297 298 err: 299 nla_nest_cancel(msg, entry_attr); 300 return -EMSGSIZE; 301 } 302 303 static int fill_res_info(struct sk_buff *msg, struct ib_device *device) 304 { 305 static const char * const names[RDMA_RESTRACK_MAX] = { 306 [RDMA_RESTRACK_PD] = "pd", 307 [RDMA_RESTRACK_CQ] = "cq", 308 [RDMA_RESTRACK_QP] = "qp", 309 [RDMA_RESTRACK_CM_ID] = "cm_id", 310 [RDMA_RESTRACK_MR] = "mr", 311 }; 312 313 struct rdma_restrack_root *res = &device->res; 314 struct nlattr *table_attr; 315 int ret, i, curr; 316 317 if (fill_nldev_handle(msg, device)) 318 return -EMSGSIZE; 319 320 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); 321 if (!table_attr) 322 return -EMSGSIZE; 323 324 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 325 if (!names[i]) 326 continue; 327 curr = rdma_restrack_count(res, i, task_active_pid_ns(current)); 328 ret = fill_res_info_entry(msg, names[i], curr); 329 if (ret) 330 goto err; 331 } 332 333 nla_nest_end(msg, table_attr); 334 return 0; 335 336 err: 337 nla_nest_cancel(msg, table_attr); 338 return ret; 339 } 340 341 static int fill_res_name_pid(struct sk_buff *msg, 342 struct rdma_restrack_entry *res) 343 { 344 /* 345 * For user resources, user is should read /proc/PID/comm to get the 346 * name of the task file. 347 */ 348 if (rdma_is_kernel_res(res)) { 349 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, 350 res->kern_name)) 351 return -EMSGSIZE; 352 } else { 353 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, 354 task_pid_vnr(res->task))) 355 return -EMSGSIZE; 356 } 357 return 0; 358 } 359 360 static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb, 361 struct rdma_restrack_entry *res, uint32_t port) 362 { 363 struct ib_qp *qp = container_of(res, struct ib_qp, res); 364 struct rdma_restrack_root *resroot = &qp->device->res; 365 struct ib_qp_init_attr qp_init_attr; 366 struct nlattr *entry_attr; 367 struct ib_qp_attr qp_attr; 368 int ret; 369 370 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); 371 if (ret) 372 return ret; 373 374 if (port && port != qp_attr.port_num) 375 return 0; 376 377 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 378 if (!entry_attr) 379 goto out; 380 381 /* In create_qp() port is not set yet */ 382 if (qp_attr.port_num && 383 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num)) 384 goto err; 385 386 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num)) 387 goto err; 388 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { 389 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, 390 qp_attr.dest_qp_num)) 391 goto err; 392 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, 393 qp_attr.rq_psn)) 394 goto err; 395 } 396 397 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) 398 goto err; 399 400 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || 401 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { 402 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, 403 qp_attr.path_mig_state)) 404 goto err; 405 } 406 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) 407 goto err; 408 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) 409 goto err; 410 411 if (fill_res_name_pid(msg, res)) 412 goto err; 413 414 if (resroot->fill_res_entry(msg, res)) 415 goto err; 416 417 nla_nest_end(msg, entry_attr); 418 return 0; 419 420 err: 421 nla_nest_cancel(msg, entry_attr); 422 out: 423 return -EMSGSIZE; 424 } 425 426 static int fill_res_cm_id_entry(struct sk_buff *msg, 427 struct netlink_callback *cb, 428 struct rdma_restrack_entry *res, uint32_t port) 429 { 430 struct rdma_id_private *id_priv = 431 container_of(res, struct rdma_id_private, res); 432 struct rdma_restrack_root *resroot = &id_priv->id.device->res; 433 struct rdma_cm_id *cm_id = &id_priv->id; 434 struct nlattr *entry_attr; 435 436 if (port && port != cm_id->port_num) 437 return 0; 438 439 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY); 440 if (!entry_attr) 441 goto out; 442 443 if (cm_id->port_num && 444 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) 445 goto err; 446 447 if (id_priv->qp_num) { 448 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) 449 goto err; 450 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) 451 goto err; 452 } 453 454 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) 455 goto err; 456 457 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) 458 goto err; 459 460 if (cm_id->route.addr.src_addr.ss_family && 461 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, 462 sizeof(cm_id->route.addr.src_addr), 463 &cm_id->route.addr.src_addr)) 464 goto err; 465 if (cm_id->route.addr.dst_addr.ss_family && 466 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, 467 sizeof(cm_id->route.addr.dst_addr), 468 &cm_id->route.addr.dst_addr)) 469 goto err; 470 471 if (fill_res_name_pid(msg, res)) 472 goto err; 473 474 if (resroot->fill_res_entry(msg, res)) 475 goto err; 476 477 nla_nest_end(msg, entry_attr); 478 return 0; 479 480 err: 481 nla_nest_cancel(msg, entry_attr); 482 out: 483 return -EMSGSIZE; 484 } 485 486 static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb, 487 struct rdma_restrack_entry *res, uint32_t port) 488 { 489 struct ib_cq *cq = container_of(res, struct ib_cq, res); 490 struct rdma_restrack_root *resroot = &cq->device->res; 491 struct nlattr *entry_attr; 492 493 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY); 494 if (!entry_attr) 495 goto out; 496 497 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) 498 goto err; 499 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 500 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) 501 goto err; 502 503 /* Poll context is only valid for kernel CQs */ 504 if (rdma_is_kernel_res(res) && 505 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) 506 goto err; 507 508 if (fill_res_name_pid(msg, res)) 509 goto err; 510 511 if (resroot->fill_res_entry(msg, res)) 512 goto err; 513 514 nla_nest_end(msg, entry_attr); 515 return 0; 516 517 err: 518 nla_nest_cancel(msg, entry_attr); 519 out: 520 return -EMSGSIZE; 521 } 522 523 static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb, 524 struct rdma_restrack_entry *res, uint32_t port) 525 { 526 struct ib_mr *mr = container_of(res, struct ib_mr, res); 527 struct rdma_restrack_root *resroot = &mr->pd->device->res; 528 struct nlattr *entry_attr; 529 530 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY); 531 if (!entry_attr) 532 goto out; 533 534 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) { 535 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) 536 goto err; 537 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) 538 goto err; 539 } 540 541 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 542 RDMA_NLDEV_ATTR_PAD)) 543 goto err; 544 545 if (fill_res_name_pid(msg, res)) 546 goto err; 547 548 if (resroot->fill_res_entry(msg, res)) 549 goto err; 550 551 nla_nest_end(msg, entry_attr); 552 return 0; 553 554 err: 555 nla_nest_cancel(msg, entry_attr); 556 out: 557 return -EMSGSIZE; 558 } 559 560 static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, 561 struct rdma_restrack_entry *res, uint32_t port) 562 { 563 struct ib_pd *pd = container_of(res, struct ib_pd, res); 564 struct rdma_restrack_root *resroot = &pd->device->res; 565 struct nlattr *entry_attr; 566 567 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY); 568 if (!entry_attr) 569 goto out; 570 571 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) { 572 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, 573 pd->local_dma_lkey)) 574 goto err; 575 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 576 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 577 pd->unsafe_global_rkey)) 578 goto err; 579 } 580 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 581 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 582 goto err; 583 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 584 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 585 pd->unsafe_global_rkey)) 586 goto err; 587 588 if (fill_res_name_pid(msg, res)) 589 goto err; 590 591 if (resroot->fill_res_entry(msg, res)) 592 goto err; 593 594 nla_nest_end(msg, entry_attr); 595 return 0; 596 597 err: 598 nla_nest_cancel(msg, entry_attr); 599 out: 600 return -EMSGSIZE; 601 } 602 603 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 604 struct netlink_ext_ack *extack) 605 { 606 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 607 struct ib_device *device; 608 struct sk_buff *msg; 609 u32 index; 610 int err; 611 612 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 613 nldev_policy, extack); 614 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 615 return -EINVAL; 616 617 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 618 619 device = ib_device_get_by_index(index); 620 if (!device) 621 return -EINVAL; 622 623 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 624 if (!msg) { 625 err = -ENOMEM; 626 goto err; 627 } 628 629 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 630 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 631 0, 0); 632 633 err = fill_dev_info(msg, device); 634 if (err) 635 goto err_free; 636 637 nlmsg_end(msg, nlh); 638 639 put_device(&device->dev); 640 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 641 642 err_free: 643 nlmsg_free(msg); 644 err: 645 put_device(&device->dev); 646 return err; 647 } 648 649 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 650 struct netlink_ext_ack *extack) 651 { 652 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 653 struct ib_device *device; 654 u32 index; 655 int err; 656 657 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 658 extack); 659 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 660 return -EINVAL; 661 662 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 663 device = ib_device_get_by_index(index); 664 if (!device) 665 return -EINVAL; 666 667 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { 668 char name[IB_DEVICE_NAME_MAX] = {}; 669 670 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 671 IB_DEVICE_NAME_MAX); 672 err = ib_device_rename(device, name); 673 } 674 675 put_device(&device->dev); 676 return err; 677 } 678 679 static int _nldev_get_dumpit(struct ib_device *device, 680 struct sk_buff *skb, 681 struct netlink_callback *cb, 682 unsigned int idx) 683 { 684 int start = cb->args[0]; 685 struct nlmsghdr *nlh; 686 687 if (idx < start) 688 return 0; 689 690 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 691 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 692 0, NLM_F_MULTI); 693 694 if (fill_dev_info(skb, device)) { 695 nlmsg_cancel(skb, nlh); 696 goto out; 697 } 698 699 nlmsg_end(skb, nlh); 700 701 idx++; 702 703 out: cb->args[0] = idx; 704 return skb->len; 705 } 706 707 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 708 { 709 /* 710 * There is no need to take lock, because 711 * we are relying on ib_core's lists_rwsem 712 */ 713 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); 714 } 715 716 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 717 struct netlink_ext_ack *extack) 718 { 719 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 720 struct ib_device *device; 721 struct sk_buff *msg; 722 u32 index; 723 u32 port; 724 int err; 725 726 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 727 nldev_policy, extack); 728 if (err || 729 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 730 !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 731 return -EINVAL; 732 733 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 734 device = ib_device_get_by_index(index); 735 if (!device) 736 return -EINVAL; 737 738 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 739 if (!rdma_is_port_valid(device, port)) { 740 err = -EINVAL; 741 goto err; 742 } 743 744 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 745 if (!msg) { 746 err = -ENOMEM; 747 goto err; 748 } 749 750 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 751 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 752 0, 0); 753 754 err = fill_port_info(msg, device, port, sock_net(skb->sk)); 755 if (err) 756 goto err_free; 757 758 nlmsg_end(msg, nlh); 759 put_device(&device->dev); 760 761 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 762 763 err_free: 764 nlmsg_free(msg); 765 err: 766 put_device(&device->dev); 767 return err; 768 } 769 770 static int nldev_port_get_dumpit(struct sk_buff *skb, 771 struct netlink_callback *cb) 772 { 773 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 774 struct ib_device *device; 775 int start = cb->args[0]; 776 struct nlmsghdr *nlh; 777 u32 idx = 0; 778 u32 ifindex; 779 int err; 780 u32 p; 781 782 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 783 nldev_policy, NULL); 784 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 785 return -EINVAL; 786 787 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 788 device = ib_device_get_by_index(ifindex); 789 if (!device) 790 return -EINVAL; 791 792 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 793 /* 794 * The dumpit function returns all information from specific 795 * index. This specific index is taken from the netlink 796 * messages request sent by user and it is available 797 * in cb->args[0]. 798 * 799 * Usually, the user doesn't fill this field and it causes 800 * to return everything. 801 * 802 */ 803 if (idx < start) { 804 idx++; 805 continue; 806 } 807 808 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 809 cb->nlh->nlmsg_seq, 810 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 811 RDMA_NLDEV_CMD_PORT_GET), 812 0, NLM_F_MULTI); 813 814 if (fill_port_info(skb, device, p, sock_net(skb->sk))) { 815 nlmsg_cancel(skb, nlh); 816 goto out; 817 } 818 idx++; 819 nlmsg_end(skb, nlh); 820 } 821 822 out: 823 put_device(&device->dev); 824 cb->args[0] = idx; 825 return skb->len; 826 } 827 828 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 829 struct netlink_ext_ack *extack) 830 { 831 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 832 struct ib_device *device; 833 struct sk_buff *msg; 834 u32 index; 835 int ret; 836 837 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 838 nldev_policy, extack); 839 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 840 return -EINVAL; 841 842 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 843 device = ib_device_get_by_index(index); 844 if (!device) 845 return -EINVAL; 846 847 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 848 if (!msg) { 849 ret = -ENOMEM; 850 goto err; 851 } 852 853 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 854 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 855 0, 0); 856 857 ret = fill_res_info(msg, device); 858 if (ret) 859 goto err_free; 860 861 nlmsg_end(msg, nlh); 862 put_device(&device->dev); 863 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 864 865 err_free: 866 nlmsg_free(msg); 867 err: 868 put_device(&device->dev); 869 return ret; 870 } 871 872 static int _nldev_res_get_dumpit(struct ib_device *device, 873 struct sk_buff *skb, 874 struct netlink_callback *cb, 875 unsigned int idx) 876 { 877 int start = cb->args[0]; 878 struct nlmsghdr *nlh; 879 880 if (idx < start) 881 return 0; 882 883 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 884 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 885 0, NLM_F_MULTI); 886 887 if (fill_res_info(skb, device)) { 888 nlmsg_cancel(skb, nlh); 889 goto out; 890 } 891 892 nlmsg_end(skb, nlh); 893 894 idx++; 895 896 out: 897 cb->args[0] = idx; 898 return skb->len; 899 } 900 901 static int nldev_res_get_dumpit(struct sk_buff *skb, 902 struct netlink_callback *cb) 903 { 904 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); 905 } 906 907 struct nldev_fill_res_entry { 908 int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb, 909 struct rdma_restrack_entry *res, u32 port); 910 enum rdma_nldev_attr nldev_attr; 911 enum rdma_nldev_command nldev_cmd; 912 }; 913 914 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { 915 [RDMA_RESTRACK_QP] = { 916 .fill_res_func = fill_res_qp_entry, 917 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET, 918 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, 919 }, 920 [RDMA_RESTRACK_CM_ID] = { 921 .fill_res_func = fill_res_cm_id_entry, 922 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET, 923 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, 924 }, 925 [RDMA_RESTRACK_CQ] = { 926 .fill_res_func = fill_res_cq_entry, 927 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET, 928 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, 929 }, 930 [RDMA_RESTRACK_MR] = { 931 .fill_res_func = fill_res_mr_entry, 932 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET, 933 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, 934 }, 935 [RDMA_RESTRACK_PD] = { 936 .fill_res_func = fill_res_pd_entry, 937 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET, 938 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, 939 }, 940 }; 941 942 static int res_get_common_dumpit(struct sk_buff *skb, 943 struct netlink_callback *cb, 944 enum rdma_restrack_type res_type) 945 { 946 const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 947 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 948 struct rdma_restrack_entry *res; 949 int err, ret = 0, idx = 0; 950 struct nlattr *table_attr; 951 struct ib_device *device; 952 int start = cb->args[0]; 953 struct nlmsghdr *nlh; 954 u32 index, port = 0; 955 bool filled = false; 956 957 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 958 nldev_policy, NULL); 959 /* 960 * Right now, we are expecting the device index to get res information, 961 * but it is possible to extend this code to return all devices in 962 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. 963 * if it doesn't exist, we will iterate over all devices. 964 * 965 * But it is not needed for now. 966 */ 967 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 968 return -EINVAL; 969 970 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 971 device = ib_device_get_by_index(index); 972 if (!device) 973 return -EINVAL; 974 975 /* 976 * If no PORT_INDEX is supplied, we will return all QPs from that device 977 */ 978 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 979 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 980 if (!rdma_is_port_valid(device, port)) { 981 ret = -EINVAL; 982 goto err_index; 983 } 984 } 985 986 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 987 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), 988 0, NLM_F_MULTI); 989 990 if (fill_nldev_handle(skb, device)) { 991 ret = -EMSGSIZE; 992 goto err; 993 } 994 995 table_attr = nla_nest_start(skb, fe->nldev_attr); 996 if (!table_attr) { 997 ret = -EMSGSIZE; 998 goto err; 999 } 1000 1001 down_read(&device->res.rwsem); 1002 hash_for_each_possible(device->res.hash, res, node, res_type) { 1003 if (idx < start) 1004 goto next; 1005 1006 if ((rdma_is_kernel_res(res) && 1007 task_active_pid_ns(current) != &init_pid_ns) || 1008 (!rdma_is_kernel_res(res) && task_active_pid_ns(current) != 1009 task_active_pid_ns(res->task))) 1010 /* 1011 * 1. Kern resources should be visible in init 1012 * namspace only 1013 * 2. Present only resources visible in the current 1014 * namespace 1015 */ 1016 goto next; 1017 1018 if (!rdma_restrack_get(res)) 1019 /* 1020 * Resource is under release now, but we are not 1021 * relesing lock now, so it will be released in 1022 * our next pass, once we will get ->next pointer. 1023 */ 1024 goto next; 1025 1026 filled = true; 1027 1028 up_read(&device->res.rwsem); 1029 ret = fe->fill_res_func(skb, cb, res, port); 1030 down_read(&device->res.rwsem); 1031 /* 1032 * Return resource back, but it won't be released till 1033 * the &device->res.rwsem will be released for write. 1034 */ 1035 rdma_restrack_put(res); 1036 1037 if (ret == -EMSGSIZE) 1038 /* 1039 * There is a chance to optimize here. 1040 * It can be done by using list_prepare_entry 1041 * and list_for_each_entry_continue afterwards. 1042 */ 1043 break; 1044 if (ret) 1045 goto res_err; 1046 next: idx++; 1047 } 1048 up_read(&device->res.rwsem); 1049 1050 nla_nest_end(skb, table_attr); 1051 nlmsg_end(skb, nlh); 1052 cb->args[0] = idx; 1053 1054 /* 1055 * No more entries to fill, cancel the message and 1056 * return 0 to mark end of dumpit. 1057 */ 1058 if (!filled) 1059 goto err; 1060 1061 put_device(&device->dev); 1062 return skb->len; 1063 1064 res_err: 1065 nla_nest_cancel(skb, table_attr); 1066 up_read(&device->res.rwsem); 1067 1068 err: 1069 nlmsg_cancel(skb, nlh); 1070 1071 err_index: 1072 put_device(&device->dev); 1073 return ret; 1074 } 1075 1076 static int nldev_res_get_qp_dumpit(struct sk_buff *skb, 1077 struct netlink_callback *cb) 1078 { 1079 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP); 1080 } 1081 1082 static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb, 1083 struct netlink_callback *cb) 1084 { 1085 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID); 1086 } 1087 1088 static int nldev_res_get_cq_dumpit(struct sk_buff *skb, 1089 struct netlink_callback *cb) 1090 { 1091 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ); 1092 } 1093 1094 static int nldev_res_get_mr_dumpit(struct sk_buff *skb, 1095 struct netlink_callback *cb) 1096 { 1097 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR); 1098 } 1099 1100 static int nldev_res_get_pd_dumpit(struct sk_buff *skb, 1101 struct netlink_callback *cb) 1102 { 1103 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD); 1104 } 1105 1106 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { 1107 [RDMA_NLDEV_CMD_GET] = { 1108 .doit = nldev_get_doit, 1109 .dump = nldev_get_dumpit, 1110 }, 1111 [RDMA_NLDEV_CMD_SET] = { 1112 .doit = nldev_set_doit, 1113 .flags = RDMA_NL_ADMIN_PERM, 1114 }, 1115 [RDMA_NLDEV_CMD_PORT_GET] = { 1116 .doit = nldev_port_get_doit, 1117 .dump = nldev_port_get_dumpit, 1118 }, 1119 [RDMA_NLDEV_CMD_RES_GET] = { 1120 .doit = nldev_res_get_doit, 1121 .dump = nldev_res_get_dumpit, 1122 }, 1123 [RDMA_NLDEV_CMD_RES_QP_GET] = { 1124 .dump = nldev_res_get_qp_dumpit, 1125 /* 1126 * .doit is not implemented yet for two reasons: 1127 * 1. It is not needed yet. 1128 * 2. There is a need to provide identifier, while it is easy 1129 * for the QPs (device index + port index + LQPN), it is not 1130 * the case for the rest of resources (PD and CQ). Because it 1131 * is better to provide similar interface for all resources, 1132 * let's wait till we will have other resources implemented 1133 * too. 1134 */ 1135 }, 1136 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { 1137 .dump = nldev_res_get_cm_id_dumpit, 1138 }, 1139 [RDMA_NLDEV_CMD_RES_CQ_GET] = { 1140 .dump = nldev_res_get_cq_dumpit, 1141 }, 1142 [RDMA_NLDEV_CMD_RES_MR_GET] = { 1143 .dump = nldev_res_get_mr_dumpit, 1144 }, 1145 [RDMA_NLDEV_CMD_RES_PD_GET] = { 1146 .dump = nldev_res_get_pd_dumpit, 1147 }, 1148 }; 1149 1150 void __init nldev_init(void) 1151 { 1152 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); 1153 } 1154 1155 void __exit nldev_exit(void) 1156 { 1157 rdma_nl_unregister(RDMA_NL_NLDEV); 1158 } 1159 1160 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); 1161