1 /* 2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Neither the names of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU General Public License ("GPL") version 2 as published by the Free 18 * Software Foundation. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/pid.h> 35 #include <linux/pid_namespace.h> 36 #include <linux/mutex.h> 37 #include <net/netlink.h> 38 #include <rdma/rdma_cm.h> 39 #include <rdma/rdma_netlink.h> 40 41 #include "core_priv.h" 42 #include "cma_priv.h" 43 #include "restrack.h" 44 #include "uverbs.h" 45 46 typedef int (*res_fill_func_t)(struct sk_buff*, bool, 47 struct rdma_restrack_entry*, uint32_t); 48 49 /* 50 * Sort array elements by the netlink attribute name 51 */ 52 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { 53 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 }, 54 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 }, 55 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING, 56 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 57 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING, 58 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE }, 59 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 }, 60 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, 61 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, 62 .len = IB_DEVICE_NAME_MAX }, 63 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, 64 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, 65 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 66 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, 67 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, 68 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, 69 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, 70 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 71 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, 72 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, 73 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, 74 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, 75 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, 76 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 77 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, 78 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, 79 .len = IFNAMSIZ }, 80 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, 81 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, 82 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, 83 .len = IFNAMSIZ }, 84 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, 85 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 86 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, 87 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, 88 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, 89 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, 90 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, 91 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, 92 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, 93 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, 94 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, 95 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, 96 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { 97 .len = sizeof(struct __kernel_sockaddr_storage) }, 98 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, 99 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, 100 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 101 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, 102 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, 103 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, 104 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, 105 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, 106 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, 107 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, 108 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, 109 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, 110 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, 111 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, 112 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, 113 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, 114 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, 115 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, 116 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, 117 [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, 118 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, 119 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, 120 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, 121 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, 122 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { 123 .len = sizeof(struct __kernel_sockaddr_storage) }, 124 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, 125 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, 126 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, 127 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 }, 128 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, 129 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 130 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, 131 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, 132 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, 133 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, 134 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, 135 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, 136 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 }, 137 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 }, 138 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED }, 139 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED }, 140 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 }, 141 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED }, 142 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED }, 143 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING }, 144 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 }, 145 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, 146 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, 147 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, 148 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, 149 }; 150 151 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, 152 enum rdma_nldev_print_type print_type) 153 { 154 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) 155 return -EMSGSIZE; 156 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && 157 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) 158 return -EMSGSIZE; 159 160 return 0; 161 } 162 163 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, 164 enum rdma_nldev_print_type print_type, 165 u32 value) 166 { 167 if (put_driver_name_print_type(msg, name, print_type)) 168 return -EMSGSIZE; 169 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) 170 return -EMSGSIZE; 171 172 return 0; 173 } 174 175 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, 176 enum rdma_nldev_print_type print_type, 177 u64 value) 178 { 179 if (put_driver_name_print_type(msg, name, print_type)) 180 return -EMSGSIZE; 181 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, 182 RDMA_NLDEV_ATTR_PAD)) 183 return -EMSGSIZE; 184 185 return 0; 186 } 187 188 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, 189 const char *str) 190 { 191 if (put_driver_name_print_type(msg, name, 192 RDMA_NLDEV_PRINT_TYPE_UNSPEC)) 193 return -EMSGSIZE; 194 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) 195 return -EMSGSIZE; 196 197 return 0; 198 } 199 EXPORT_SYMBOL(rdma_nl_put_driver_string); 200 201 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) 202 { 203 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 204 value); 205 } 206 EXPORT_SYMBOL(rdma_nl_put_driver_u32); 207 208 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, 209 u32 value) 210 { 211 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 212 value); 213 } 214 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); 215 216 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) 217 { 218 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 219 value); 220 } 221 EXPORT_SYMBOL(rdma_nl_put_driver_u64); 222 223 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) 224 { 225 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 226 value); 227 } 228 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); 229 230 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) 231 { 232 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) 233 return -EMSGSIZE; 234 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 235 dev_name(&device->dev))) 236 return -EMSGSIZE; 237 238 return 0; 239 } 240 241 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) 242 { 243 char fw[IB_FW_VERSION_NAME_MAX]; 244 int ret = 0; 245 u8 port; 246 247 if (fill_nldev_handle(msg, device)) 248 return -EMSGSIZE; 249 250 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) 251 return -EMSGSIZE; 252 253 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); 254 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 255 device->attrs.device_cap_flags, 256 RDMA_NLDEV_ATTR_PAD)) 257 return -EMSGSIZE; 258 259 ib_get_device_fw_str(device, fw); 260 /* Device without FW has strlen(fw) = 0 */ 261 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) 262 return -EMSGSIZE; 263 264 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, 265 be64_to_cpu(device->node_guid), 266 RDMA_NLDEV_ATTR_PAD)) 267 return -EMSGSIZE; 268 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, 269 be64_to_cpu(device->attrs.sys_image_guid), 270 RDMA_NLDEV_ATTR_PAD)) 271 return -EMSGSIZE; 272 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) 273 return -EMSGSIZE; 274 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) 275 return -EMSGSIZE; 276 277 /* 278 * Link type is determined on first port and mlx4 device 279 * which can potentially have two different link type for the same 280 * IB device is considered as better to be avoided in the future, 281 */ 282 port = rdma_start_port(device); 283 if (rdma_cap_opa_mad(device, port)) 284 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); 285 else if (rdma_protocol_ib(device, port)) 286 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); 287 else if (rdma_protocol_iwarp(device, port)) 288 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); 289 else if (rdma_protocol_roce(device, port)) 290 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); 291 else if (rdma_protocol_usnic(device, port)) 292 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, 293 "usnic"); 294 return ret; 295 } 296 297 static int fill_port_info(struct sk_buff *msg, 298 struct ib_device *device, u32 port, 299 const struct net *net) 300 { 301 struct net_device *netdev = NULL; 302 struct ib_port_attr attr; 303 int ret; 304 u64 cap_flags = 0; 305 306 if (fill_nldev_handle(msg, device)) 307 return -EMSGSIZE; 308 309 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 310 return -EMSGSIZE; 311 312 ret = ib_query_port(device, port, &attr); 313 if (ret) 314 return ret; 315 316 if (rdma_protocol_ib(device, port)) { 317 BUILD_BUG_ON((sizeof(attr.port_cap_flags) + 318 sizeof(attr.port_cap_flags2)) > sizeof(u64)); 319 cap_flags = attr.port_cap_flags | 320 ((u64)attr.port_cap_flags2 << 32); 321 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 322 cap_flags, RDMA_NLDEV_ATTR_PAD)) 323 return -EMSGSIZE; 324 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, 325 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) 326 return -EMSGSIZE; 327 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) 328 return -EMSGSIZE; 329 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) 330 return -EMSGSIZE; 331 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) 332 return -EMSGSIZE; 333 } 334 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) 335 return -EMSGSIZE; 336 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 337 return -EMSGSIZE; 338 339 netdev = ib_device_get_netdev(device, port); 340 if (netdev && net_eq(dev_net(netdev), net)) { 341 ret = nla_put_u32(msg, 342 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 343 if (ret) 344 goto out; 345 ret = nla_put_string(msg, 346 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 347 } 348 349 out: 350 if (netdev) 351 dev_put(netdev); 352 return ret; 353 } 354 355 static int fill_res_info_entry(struct sk_buff *msg, 356 const char *name, u64 curr) 357 { 358 struct nlattr *entry_attr; 359 360 entry_attr = nla_nest_start_noflag(msg, 361 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); 362 if (!entry_attr) 363 return -EMSGSIZE; 364 365 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) 366 goto err; 367 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 368 RDMA_NLDEV_ATTR_PAD)) 369 goto err; 370 371 nla_nest_end(msg, entry_attr); 372 return 0; 373 374 err: 375 nla_nest_cancel(msg, entry_attr); 376 return -EMSGSIZE; 377 } 378 379 static int fill_res_info(struct sk_buff *msg, struct ib_device *device) 380 { 381 static const char * const names[RDMA_RESTRACK_MAX] = { 382 [RDMA_RESTRACK_PD] = "pd", 383 [RDMA_RESTRACK_CQ] = "cq", 384 [RDMA_RESTRACK_QP] = "qp", 385 [RDMA_RESTRACK_CM_ID] = "cm_id", 386 [RDMA_RESTRACK_MR] = "mr", 387 [RDMA_RESTRACK_CTX] = "ctx", 388 }; 389 390 struct nlattr *table_attr; 391 int ret, i, curr; 392 393 if (fill_nldev_handle(msg, device)) 394 return -EMSGSIZE; 395 396 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); 397 if (!table_attr) 398 return -EMSGSIZE; 399 400 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 401 if (!names[i]) 402 continue; 403 curr = rdma_restrack_count(device, i); 404 ret = fill_res_info_entry(msg, names[i], curr); 405 if (ret) 406 goto err; 407 } 408 409 nla_nest_end(msg, table_attr); 410 return 0; 411 412 err: 413 nla_nest_cancel(msg, table_attr); 414 return ret; 415 } 416 417 static int fill_res_name_pid(struct sk_buff *msg, 418 struct rdma_restrack_entry *res) 419 { 420 int err = 0; 421 422 /* 423 * For user resources, user is should read /proc/PID/comm to get the 424 * name of the task file. 425 */ 426 if (rdma_is_kernel_res(res)) { 427 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, 428 res->kern_name); 429 } else { 430 pid_t pid; 431 432 pid = task_pid_vnr(res->task); 433 /* 434 * Task is dead and in zombie state. 435 * There is no need to print PID anymore. 436 */ 437 if (pid) 438 /* 439 * This part is racy, task can be killed and PID will 440 * be zero right here but it is ok, next query won't 441 * return PID. We don't promise real-time reflection 442 * of SW objects. 443 */ 444 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); 445 } 446 447 return err ? -EMSGSIZE : 0; 448 } 449 450 static int fill_res_qp_entry_query(struct sk_buff *msg, 451 struct rdma_restrack_entry *res, 452 struct ib_device *dev, 453 struct ib_qp *qp) 454 { 455 struct ib_qp_init_attr qp_init_attr; 456 struct ib_qp_attr qp_attr; 457 int ret; 458 459 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); 460 if (ret) 461 return ret; 462 463 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { 464 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, 465 qp_attr.dest_qp_num)) 466 goto err; 467 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, 468 qp_attr.rq_psn)) 469 goto err; 470 } 471 472 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) 473 goto err; 474 475 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || 476 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { 477 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, 478 qp_attr.path_mig_state)) 479 goto err; 480 } 481 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) 482 goto err; 483 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) 484 goto err; 485 486 if (dev->ops.fill_res_qp_entry) 487 return dev->ops.fill_res_qp_entry(msg, qp); 488 return 0; 489 490 err: return -EMSGSIZE; 491 } 492 493 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, 494 struct rdma_restrack_entry *res, uint32_t port) 495 { 496 struct ib_qp *qp = container_of(res, struct ib_qp, res); 497 struct ib_device *dev = qp->device; 498 int ret; 499 500 if (port && port != qp->port) 501 return -EAGAIN; 502 503 /* In create_qp() port is not set yet */ 504 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) 505 return -EINVAL; 506 507 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); 508 if (ret) 509 return -EMSGSIZE; 510 511 if (!rdma_is_kernel_res(res) && 512 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) 513 return -EMSGSIZE; 514 515 ret = fill_res_name_pid(msg, res); 516 if (ret) 517 return -EMSGSIZE; 518 519 return fill_res_qp_entry_query(msg, res, dev, qp); 520 } 521 522 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 523 struct rdma_restrack_entry *res, uint32_t port) 524 { 525 struct ib_qp *qp = container_of(res, struct ib_qp, res); 526 struct ib_device *dev = qp->device; 527 528 if (port && port != qp->port) 529 return -EAGAIN; 530 if (!dev->ops.fill_res_qp_entry_raw) 531 return -EINVAL; 532 return dev->ops.fill_res_qp_entry_raw(msg, qp); 533 } 534 535 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, 536 struct rdma_restrack_entry *res, uint32_t port) 537 { 538 struct rdma_id_private *id_priv = 539 container_of(res, struct rdma_id_private, res); 540 struct ib_device *dev = id_priv->id.device; 541 struct rdma_cm_id *cm_id = &id_priv->id; 542 543 if (port && port != cm_id->port_num) 544 return 0; 545 546 if (cm_id->port_num && 547 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) 548 goto err; 549 550 if (id_priv->qp_num) { 551 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) 552 goto err; 553 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) 554 goto err; 555 } 556 557 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) 558 goto err; 559 560 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) 561 goto err; 562 563 if (cm_id->route.addr.src_addr.ss_family && 564 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, 565 sizeof(cm_id->route.addr.src_addr), 566 &cm_id->route.addr.src_addr)) 567 goto err; 568 if (cm_id->route.addr.dst_addr.ss_family && 569 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, 570 sizeof(cm_id->route.addr.dst_addr), 571 &cm_id->route.addr.dst_addr)) 572 goto err; 573 574 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) 575 goto err; 576 577 if (fill_res_name_pid(msg, res)) 578 goto err; 579 580 if (dev->ops.fill_res_cm_id_entry) 581 return dev->ops.fill_res_cm_id_entry(msg, cm_id); 582 return 0; 583 584 err: return -EMSGSIZE; 585 } 586 587 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, 588 struct rdma_restrack_entry *res, uint32_t port) 589 { 590 struct ib_cq *cq = container_of(res, struct ib_cq, res); 591 struct ib_device *dev = cq->device; 592 593 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) 594 return -EMSGSIZE; 595 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 596 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) 597 return -EMSGSIZE; 598 599 /* Poll context is only valid for kernel CQs */ 600 if (rdma_is_kernel_res(res) && 601 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) 602 return -EMSGSIZE; 603 604 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) 605 return -EMSGSIZE; 606 607 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) 608 return -EMSGSIZE; 609 if (!rdma_is_kernel_res(res) && 610 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 611 cq->uobject->uevent.uobject.context->res.id)) 612 return -EMSGSIZE; 613 614 if (fill_res_name_pid(msg, res)) 615 return -EMSGSIZE; 616 617 return (dev->ops.fill_res_cq_entry) ? 618 dev->ops.fill_res_cq_entry(msg, cq) : 0; 619 } 620 621 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 622 struct rdma_restrack_entry *res, uint32_t port) 623 { 624 struct ib_cq *cq = container_of(res, struct ib_cq, res); 625 struct ib_device *dev = cq->device; 626 627 if (!dev->ops.fill_res_cq_entry_raw) 628 return -EINVAL; 629 return dev->ops.fill_res_cq_entry_raw(msg, cq); 630 } 631 632 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 633 struct rdma_restrack_entry *res, uint32_t port) 634 { 635 struct ib_mr *mr = container_of(res, struct ib_mr, res); 636 struct ib_device *dev = mr->pd->device; 637 638 if (has_cap_net_admin) { 639 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) 640 return -EMSGSIZE; 641 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) 642 return -EMSGSIZE; 643 } 644 645 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 646 RDMA_NLDEV_ATTR_PAD)) 647 return -EMSGSIZE; 648 649 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 650 return -EMSGSIZE; 651 652 if (!rdma_is_kernel_res(res) && 653 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) 654 return -EMSGSIZE; 655 656 if (fill_res_name_pid(msg, res)) 657 return -EMSGSIZE; 658 659 return (dev->ops.fill_res_mr_entry) ? 660 dev->ops.fill_res_mr_entry(msg, mr) : 661 0; 662 } 663 664 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 665 struct rdma_restrack_entry *res, uint32_t port) 666 { 667 struct ib_mr *mr = container_of(res, struct ib_mr, res); 668 struct ib_device *dev = mr->pd->device; 669 670 if (!dev->ops.fill_res_mr_entry_raw) 671 return -EINVAL; 672 return dev->ops.fill_res_mr_entry_raw(msg, mr); 673 } 674 675 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, 676 struct rdma_restrack_entry *res, uint32_t port) 677 { 678 struct ib_pd *pd = container_of(res, struct ib_pd, res); 679 680 if (has_cap_net_admin) { 681 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, 682 pd->local_dma_lkey)) 683 goto err; 684 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 685 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 686 pd->unsafe_global_rkey)) 687 goto err; 688 } 689 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 690 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 691 goto err; 692 693 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) 694 goto err; 695 696 if (!rdma_is_kernel_res(res) && 697 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 698 pd->uobject->context->res.id)) 699 goto err; 700 701 return fill_res_name_pid(msg, res); 702 703 err: return -EMSGSIZE; 704 } 705 706 static int fill_stat_counter_mode(struct sk_buff *msg, 707 struct rdma_counter *counter) 708 { 709 struct rdma_counter_mode *m = &counter->mode; 710 711 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) 712 return -EMSGSIZE; 713 714 if (m->mode == RDMA_COUNTER_MODE_AUTO) { 715 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && 716 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) 717 return -EMSGSIZE; 718 719 if ((m->mask & RDMA_COUNTER_MASK_PID) && 720 fill_res_name_pid(msg, &counter->res)) 721 return -EMSGSIZE; 722 } 723 724 return 0; 725 } 726 727 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) 728 { 729 struct nlattr *entry_attr; 730 731 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 732 if (!entry_attr) 733 return -EMSGSIZE; 734 735 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) 736 goto err; 737 738 nla_nest_end(msg, entry_attr); 739 return 0; 740 741 err: 742 nla_nest_cancel(msg, entry_attr); 743 return -EMSGSIZE; 744 } 745 746 static int fill_stat_counter_qps(struct sk_buff *msg, 747 struct rdma_counter *counter) 748 { 749 struct rdma_restrack_entry *res; 750 struct rdma_restrack_root *rt; 751 struct nlattr *table_attr; 752 struct ib_qp *qp = NULL; 753 unsigned long id = 0; 754 int ret = 0; 755 756 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); 757 758 rt = &counter->device->res[RDMA_RESTRACK_QP]; 759 xa_lock(&rt->xa); 760 xa_for_each(&rt->xa, id, res) { 761 qp = container_of(res, struct ib_qp, res); 762 if (!qp->counter || (qp->counter->id != counter->id)) 763 continue; 764 765 ret = fill_stat_counter_qp_entry(msg, qp->qp_num); 766 if (ret) 767 goto err; 768 } 769 770 xa_unlock(&rt->xa); 771 nla_nest_end(msg, table_attr); 772 return 0; 773 774 err: 775 xa_unlock(&rt->xa); 776 nla_nest_cancel(msg, table_attr); 777 return ret; 778 } 779 780 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, 781 u64 value) 782 { 783 struct nlattr *entry_attr; 784 785 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); 786 if (!entry_attr) 787 return -EMSGSIZE; 788 789 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, 790 name)) 791 goto err; 792 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, 793 value, RDMA_NLDEV_ATTR_PAD)) 794 goto err; 795 796 nla_nest_end(msg, entry_attr); 797 return 0; 798 799 err: 800 nla_nest_cancel(msg, entry_attr); 801 return -EMSGSIZE; 802 } 803 EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry); 804 805 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 806 struct rdma_restrack_entry *res, uint32_t port) 807 { 808 struct ib_mr *mr = container_of(res, struct ib_mr, res); 809 struct ib_device *dev = mr->pd->device; 810 811 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 812 goto err; 813 814 if (dev->ops.fill_stat_mr_entry) 815 return dev->ops.fill_stat_mr_entry(msg, mr); 816 return 0; 817 818 err: 819 return -EMSGSIZE; 820 } 821 822 static int fill_stat_counter_hwcounters(struct sk_buff *msg, 823 struct rdma_counter *counter) 824 { 825 struct rdma_hw_stats *st = counter->stats; 826 struct nlattr *table_attr; 827 int i; 828 829 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 830 if (!table_attr) 831 return -EMSGSIZE; 832 833 for (i = 0; i < st->num_counters; i++) 834 if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i])) 835 goto err; 836 837 nla_nest_end(msg, table_attr); 838 return 0; 839 840 err: 841 nla_nest_cancel(msg, table_attr); 842 return -EMSGSIZE; 843 } 844 845 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, 846 struct rdma_restrack_entry *res, 847 uint32_t port) 848 { 849 struct rdma_counter *counter = 850 container_of(res, struct rdma_counter, res); 851 852 if (port && port != counter->port) 853 return -EAGAIN; 854 855 /* Dump it even query failed */ 856 rdma_counter_query_stats(counter); 857 858 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || 859 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || 860 fill_stat_counter_mode(msg, counter) || 861 fill_stat_counter_qps(msg, counter) || 862 fill_stat_counter_hwcounters(msg, counter)) 863 return -EMSGSIZE; 864 865 return 0; 866 } 867 868 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 869 struct netlink_ext_ack *extack) 870 { 871 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 872 struct ib_device *device; 873 struct sk_buff *msg; 874 u32 index; 875 int err; 876 877 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 878 nldev_policy, extack); 879 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 880 return -EINVAL; 881 882 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 883 884 device = ib_device_get_by_index(sock_net(skb->sk), index); 885 if (!device) 886 return -EINVAL; 887 888 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 889 if (!msg) { 890 err = -ENOMEM; 891 goto err; 892 } 893 894 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 895 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 896 0, 0); 897 898 err = fill_dev_info(msg, device); 899 if (err) 900 goto err_free; 901 902 nlmsg_end(msg, nlh); 903 904 ib_device_put(device); 905 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 906 907 err_free: 908 nlmsg_free(msg); 909 err: 910 ib_device_put(device); 911 return err; 912 } 913 914 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 915 struct netlink_ext_ack *extack) 916 { 917 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 918 struct ib_device *device; 919 u32 index; 920 int err; 921 922 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 923 nldev_policy, extack); 924 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 925 return -EINVAL; 926 927 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 928 device = ib_device_get_by_index(sock_net(skb->sk), index); 929 if (!device) 930 return -EINVAL; 931 932 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { 933 char name[IB_DEVICE_NAME_MAX] = {}; 934 935 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 936 IB_DEVICE_NAME_MAX); 937 if (strlen(name) == 0) { 938 err = -EINVAL; 939 goto done; 940 } 941 err = ib_device_rename(device, name); 942 goto done; 943 } 944 945 if (tb[RDMA_NLDEV_NET_NS_FD]) { 946 u32 ns_fd; 947 948 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); 949 err = ib_device_set_netns_put(skb, device, ns_fd); 950 goto put_done; 951 } 952 953 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) { 954 u8 use_dim; 955 956 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]); 957 err = ib_device_set_dim(device, use_dim); 958 goto done; 959 } 960 961 done: 962 ib_device_put(device); 963 put_done: 964 return err; 965 } 966 967 static int _nldev_get_dumpit(struct ib_device *device, 968 struct sk_buff *skb, 969 struct netlink_callback *cb, 970 unsigned int idx) 971 { 972 int start = cb->args[0]; 973 struct nlmsghdr *nlh; 974 975 if (idx < start) 976 return 0; 977 978 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 979 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 980 0, NLM_F_MULTI); 981 982 if (fill_dev_info(skb, device)) { 983 nlmsg_cancel(skb, nlh); 984 goto out; 985 } 986 987 nlmsg_end(skb, nlh); 988 989 idx++; 990 991 out: cb->args[0] = idx; 992 return skb->len; 993 } 994 995 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 996 { 997 /* 998 * There is no need to take lock, because 999 * we are relying on ib_core's locking. 1000 */ 1001 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); 1002 } 1003 1004 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1005 struct netlink_ext_ack *extack) 1006 { 1007 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1008 struct ib_device *device; 1009 struct sk_buff *msg; 1010 u32 index; 1011 u32 port; 1012 int err; 1013 1014 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1015 nldev_policy, extack); 1016 if (err || 1017 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 1018 !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 1019 return -EINVAL; 1020 1021 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1022 device = ib_device_get_by_index(sock_net(skb->sk), index); 1023 if (!device) 1024 return -EINVAL; 1025 1026 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1027 if (!rdma_is_port_valid(device, port)) { 1028 err = -EINVAL; 1029 goto err; 1030 } 1031 1032 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1033 if (!msg) { 1034 err = -ENOMEM; 1035 goto err; 1036 } 1037 1038 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1039 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1040 0, 0); 1041 1042 err = fill_port_info(msg, device, port, sock_net(skb->sk)); 1043 if (err) 1044 goto err_free; 1045 1046 nlmsg_end(msg, nlh); 1047 ib_device_put(device); 1048 1049 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1050 1051 err_free: 1052 nlmsg_free(msg); 1053 err: 1054 ib_device_put(device); 1055 return err; 1056 } 1057 1058 static int nldev_port_get_dumpit(struct sk_buff *skb, 1059 struct netlink_callback *cb) 1060 { 1061 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1062 struct ib_device *device; 1063 int start = cb->args[0]; 1064 struct nlmsghdr *nlh; 1065 u32 idx = 0; 1066 u32 ifindex; 1067 int err; 1068 unsigned int p; 1069 1070 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1071 nldev_policy, NULL); 1072 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1073 return -EINVAL; 1074 1075 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1076 device = ib_device_get_by_index(sock_net(skb->sk), ifindex); 1077 if (!device) 1078 return -EINVAL; 1079 1080 rdma_for_each_port (device, p) { 1081 /* 1082 * The dumpit function returns all information from specific 1083 * index. This specific index is taken from the netlink 1084 * messages request sent by user and it is available 1085 * in cb->args[0]. 1086 * 1087 * Usually, the user doesn't fill this field and it causes 1088 * to return everything. 1089 * 1090 */ 1091 if (idx < start) { 1092 idx++; 1093 continue; 1094 } 1095 1096 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 1097 cb->nlh->nlmsg_seq, 1098 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1099 RDMA_NLDEV_CMD_PORT_GET), 1100 0, NLM_F_MULTI); 1101 1102 if (fill_port_info(skb, device, p, sock_net(skb->sk))) { 1103 nlmsg_cancel(skb, nlh); 1104 goto out; 1105 } 1106 idx++; 1107 nlmsg_end(skb, nlh); 1108 } 1109 1110 out: 1111 ib_device_put(device); 1112 cb->args[0] = idx; 1113 return skb->len; 1114 } 1115 1116 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1117 struct netlink_ext_ack *extack) 1118 { 1119 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1120 struct ib_device *device; 1121 struct sk_buff *msg; 1122 u32 index; 1123 int ret; 1124 1125 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1126 nldev_policy, extack); 1127 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1128 return -EINVAL; 1129 1130 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1131 device = ib_device_get_by_index(sock_net(skb->sk), index); 1132 if (!device) 1133 return -EINVAL; 1134 1135 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1136 if (!msg) { 1137 ret = -ENOMEM; 1138 goto err; 1139 } 1140 1141 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1142 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 1143 0, 0); 1144 1145 ret = fill_res_info(msg, device); 1146 if (ret) 1147 goto err_free; 1148 1149 nlmsg_end(msg, nlh); 1150 ib_device_put(device); 1151 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1152 1153 err_free: 1154 nlmsg_free(msg); 1155 err: 1156 ib_device_put(device); 1157 return ret; 1158 } 1159 1160 static int _nldev_res_get_dumpit(struct ib_device *device, 1161 struct sk_buff *skb, 1162 struct netlink_callback *cb, 1163 unsigned int idx) 1164 { 1165 int start = cb->args[0]; 1166 struct nlmsghdr *nlh; 1167 1168 if (idx < start) 1169 return 0; 1170 1171 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1172 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 1173 0, NLM_F_MULTI); 1174 1175 if (fill_res_info(skb, device)) { 1176 nlmsg_cancel(skb, nlh); 1177 goto out; 1178 } 1179 nlmsg_end(skb, nlh); 1180 1181 idx++; 1182 1183 out: 1184 cb->args[0] = idx; 1185 return skb->len; 1186 } 1187 1188 static int nldev_res_get_dumpit(struct sk_buff *skb, 1189 struct netlink_callback *cb) 1190 { 1191 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); 1192 } 1193 1194 struct nldev_fill_res_entry { 1195 enum rdma_nldev_attr nldev_attr; 1196 u8 flags; 1197 u32 entry; 1198 u32 id; 1199 }; 1200 1201 enum nldev_res_flags { 1202 NLDEV_PER_DEV = 1 << 0, 1203 }; 1204 1205 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { 1206 [RDMA_RESTRACK_QP] = { 1207 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, 1208 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, 1209 .id = RDMA_NLDEV_ATTR_RES_LQPN, 1210 }, 1211 [RDMA_RESTRACK_CM_ID] = { 1212 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, 1213 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, 1214 .id = RDMA_NLDEV_ATTR_RES_CM_IDN, 1215 }, 1216 [RDMA_RESTRACK_CQ] = { 1217 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, 1218 .flags = NLDEV_PER_DEV, 1219 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, 1220 .id = RDMA_NLDEV_ATTR_RES_CQN, 1221 }, 1222 [RDMA_RESTRACK_MR] = { 1223 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, 1224 .flags = NLDEV_PER_DEV, 1225 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, 1226 .id = RDMA_NLDEV_ATTR_RES_MRN, 1227 }, 1228 [RDMA_RESTRACK_PD] = { 1229 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, 1230 .flags = NLDEV_PER_DEV, 1231 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, 1232 .id = RDMA_NLDEV_ATTR_RES_PDN, 1233 }, 1234 [RDMA_RESTRACK_COUNTER] = { 1235 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, 1236 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, 1237 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, 1238 }, 1239 }; 1240 1241 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1242 struct netlink_ext_ack *extack, 1243 enum rdma_restrack_type res_type, 1244 res_fill_func_t fill_func) 1245 { 1246 const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1247 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1248 struct rdma_restrack_entry *res; 1249 struct ib_device *device; 1250 u32 index, id, port = 0; 1251 bool has_cap_net_admin; 1252 struct sk_buff *msg; 1253 int ret; 1254 1255 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1256 nldev_policy, extack); 1257 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) 1258 return -EINVAL; 1259 1260 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1261 device = ib_device_get_by_index(sock_net(skb->sk), index); 1262 if (!device) 1263 return -EINVAL; 1264 1265 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1266 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1267 if (!rdma_is_port_valid(device, port)) { 1268 ret = -EINVAL; 1269 goto err; 1270 } 1271 } 1272 1273 if ((port && fe->flags & NLDEV_PER_DEV) || 1274 (!port && ~fe->flags & NLDEV_PER_DEV)) { 1275 ret = -EINVAL; 1276 goto err; 1277 } 1278 1279 id = nla_get_u32(tb[fe->id]); 1280 res = rdma_restrack_get_byid(device, res_type, id); 1281 if (IS_ERR(res)) { 1282 ret = PTR_ERR(res); 1283 goto err; 1284 } 1285 1286 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1287 if (!msg) { 1288 ret = -ENOMEM; 1289 goto err_get; 1290 } 1291 1292 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1293 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1294 RDMA_NL_GET_OP(nlh->nlmsg_type)), 1295 0, 0); 1296 1297 if (fill_nldev_handle(msg, device)) { 1298 ret = -EMSGSIZE; 1299 goto err_free; 1300 } 1301 1302 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); 1303 1304 ret = fill_func(msg, has_cap_net_admin, res, port); 1305 if (ret) 1306 goto err_free; 1307 1308 rdma_restrack_put(res); 1309 nlmsg_end(msg, nlh); 1310 ib_device_put(device); 1311 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1312 1313 err_free: 1314 nlmsg_free(msg); 1315 err_get: 1316 rdma_restrack_put(res); 1317 err: 1318 ib_device_put(device); 1319 return ret; 1320 } 1321 1322 static int res_get_common_dumpit(struct sk_buff *skb, 1323 struct netlink_callback *cb, 1324 enum rdma_restrack_type res_type, 1325 res_fill_func_t fill_func) 1326 { 1327 const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1328 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1329 struct rdma_restrack_entry *res; 1330 struct rdma_restrack_root *rt; 1331 int err, ret = 0, idx = 0; 1332 struct nlattr *table_attr; 1333 struct nlattr *entry_attr; 1334 struct ib_device *device; 1335 int start = cb->args[0]; 1336 bool has_cap_net_admin; 1337 struct nlmsghdr *nlh; 1338 unsigned long id; 1339 u32 index, port = 0; 1340 bool filled = false; 1341 1342 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1343 nldev_policy, NULL); 1344 /* 1345 * Right now, we are expecting the device index to get res information, 1346 * but it is possible to extend this code to return all devices in 1347 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. 1348 * if it doesn't exist, we will iterate over all devices. 1349 * 1350 * But it is not needed for now. 1351 */ 1352 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1353 return -EINVAL; 1354 1355 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1356 device = ib_device_get_by_index(sock_net(skb->sk), index); 1357 if (!device) 1358 return -EINVAL; 1359 1360 /* 1361 * If no PORT_INDEX is supplied, we will return all QPs from that device 1362 */ 1363 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1364 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1365 if (!rdma_is_port_valid(device, port)) { 1366 ret = -EINVAL; 1367 goto err_index; 1368 } 1369 } 1370 1371 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1372 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1373 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 1374 0, NLM_F_MULTI); 1375 1376 if (fill_nldev_handle(skb, device)) { 1377 ret = -EMSGSIZE; 1378 goto err; 1379 } 1380 1381 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); 1382 if (!table_attr) { 1383 ret = -EMSGSIZE; 1384 goto err; 1385 } 1386 1387 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); 1388 1389 rt = &device->res[res_type]; 1390 xa_lock(&rt->xa); 1391 /* 1392 * FIXME: if the skip ahead is something common this loop should 1393 * use xas_for_each & xas_pause to optimize, we can have a lot of 1394 * objects. 1395 */ 1396 xa_for_each(&rt->xa, id, res) { 1397 if (idx < start || !rdma_restrack_get(res)) 1398 goto next; 1399 1400 xa_unlock(&rt->xa); 1401 1402 filled = true; 1403 1404 entry_attr = nla_nest_start_noflag(skb, fe->entry); 1405 if (!entry_attr) { 1406 ret = -EMSGSIZE; 1407 rdma_restrack_put(res); 1408 goto msg_full; 1409 } 1410 1411 ret = fill_func(skb, has_cap_net_admin, res, port); 1412 1413 rdma_restrack_put(res); 1414 1415 if (ret) { 1416 nla_nest_cancel(skb, entry_attr); 1417 if (ret == -EMSGSIZE) 1418 goto msg_full; 1419 if (ret == -EAGAIN) 1420 goto again; 1421 goto res_err; 1422 } 1423 nla_nest_end(skb, entry_attr); 1424 again: xa_lock(&rt->xa); 1425 next: idx++; 1426 } 1427 xa_unlock(&rt->xa); 1428 1429 msg_full: 1430 nla_nest_end(skb, table_attr); 1431 nlmsg_end(skb, nlh); 1432 cb->args[0] = idx; 1433 1434 /* 1435 * No more entries to fill, cancel the message and 1436 * return 0 to mark end of dumpit. 1437 */ 1438 if (!filled) 1439 goto err; 1440 1441 ib_device_put(device); 1442 return skb->len; 1443 1444 res_err: 1445 nla_nest_cancel(skb, table_attr); 1446 1447 err: 1448 nlmsg_cancel(skb, nlh); 1449 1450 err_index: 1451 ib_device_put(device); 1452 return ret; 1453 } 1454 1455 #define RES_GET_FUNCS(name, type) \ 1456 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ 1457 struct netlink_callback *cb) \ 1458 { \ 1459 return res_get_common_dumpit(skb, cb, type, \ 1460 fill_res_##name##_entry); \ 1461 } \ 1462 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ 1463 struct nlmsghdr *nlh, \ 1464 struct netlink_ext_ack *extack) \ 1465 { \ 1466 return res_get_common_doit(skb, nlh, extack, type, \ 1467 fill_res_##name##_entry); \ 1468 } 1469 1470 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); 1471 RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); 1472 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); 1473 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); 1474 RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); 1475 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); 1476 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); 1477 RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); 1478 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); 1479 1480 static LIST_HEAD(link_ops); 1481 static DECLARE_RWSEM(link_ops_rwsem); 1482 1483 static const struct rdma_link_ops *link_ops_get(const char *type) 1484 { 1485 const struct rdma_link_ops *ops; 1486 1487 list_for_each_entry(ops, &link_ops, list) { 1488 if (!strcmp(ops->type, type)) 1489 goto out; 1490 } 1491 ops = NULL; 1492 out: 1493 return ops; 1494 } 1495 1496 void rdma_link_register(struct rdma_link_ops *ops) 1497 { 1498 down_write(&link_ops_rwsem); 1499 if (WARN_ON_ONCE(link_ops_get(ops->type))) 1500 goto out; 1501 list_add(&ops->list, &link_ops); 1502 out: 1503 up_write(&link_ops_rwsem); 1504 } 1505 EXPORT_SYMBOL(rdma_link_register); 1506 1507 void rdma_link_unregister(struct rdma_link_ops *ops) 1508 { 1509 down_write(&link_ops_rwsem); 1510 list_del(&ops->list); 1511 up_write(&link_ops_rwsem); 1512 } 1513 EXPORT_SYMBOL(rdma_link_unregister); 1514 1515 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 1516 struct netlink_ext_ack *extack) 1517 { 1518 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1519 char ibdev_name[IB_DEVICE_NAME_MAX]; 1520 const struct rdma_link_ops *ops; 1521 char ndev_name[IFNAMSIZ]; 1522 struct net_device *ndev; 1523 char type[IFNAMSIZ]; 1524 int err; 1525 1526 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1527 nldev_policy, extack); 1528 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || 1529 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) 1530 return -EINVAL; 1531 1532 nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 1533 sizeof(ibdev_name)); 1534 if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) 1535 return -EINVAL; 1536 1537 nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); 1538 nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], 1539 sizeof(ndev_name)); 1540 1541 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name); 1542 if (!ndev) 1543 return -ENODEV; 1544 1545 down_read(&link_ops_rwsem); 1546 ops = link_ops_get(type); 1547 #ifdef CONFIG_MODULES 1548 if (!ops) { 1549 up_read(&link_ops_rwsem); 1550 request_module("rdma-link-%s", type); 1551 down_read(&link_ops_rwsem); 1552 ops = link_ops_get(type); 1553 } 1554 #endif 1555 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; 1556 up_read(&link_ops_rwsem); 1557 dev_put(ndev); 1558 1559 return err; 1560 } 1561 1562 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 1563 struct netlink_ext_ack *extack) 1564 { 1565 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1566 struct ib_device *device; 1567 u32 index; 1568 int err; 1569 1570 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1571 nldev_policy, extack); 1572 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1573 return -EINVAL; 1574 1575 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1576 device = ib_device_get_by_index(sock_net(skb->sk), index); 1577 if (!device) 1578 return -EINVAL; 1579 1580 if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) { 1581 ib_device_put(device); 1582 return -EINVAL; 1583 } 1584 1585 ib_unregister_device_and_put(device); 1586 return 0; 1587 } 1588 1589 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, 1590 struct netlink_ext_ack *extack) 1591 { 1592 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1593 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE]; 1594 struct ib_client_nl_info data = {}; 1595 struct ib_device *ibdev = NULL; 1596 struct sk_buff *msg; 1597 u32 index; 1598 int err; 1599 1600 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 1601 extack); 1602 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) 1603 return -EINVAL; 1604 1605 nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE], 1606 sizeof(client_name)); 1607 1608 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) { 1609 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1610 ibdev = ib_device_get_by_index(sock_net(skb->sk), index); 1611 if (!ibdev) 1612 return -EINVAL; 1613 1614 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1615 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1616 if (!rdma_is_port_valid(ibdev, data.port)) { 1617 err = -EINVAL; 1618 goto out_put; 1619 } 1620 } else { 1621 data.port = -1; 1622 } 1623 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1624 return -EINVAL; 1625 } 1626 1627 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1628 if (!msg) { 1629 err = -ENOMEM; 1630 goto out_put; 1631 } 1632 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1633 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1634 RDMA_NLDEV_CMD_GET_CHARDEV), 1635 0, 0); 1636 1637 data.nl_msg = msg; 1638 err = ib_get_client_nl_info(ibdev, client_name, &data); 1639 if (err) 1640 goto out_nlmsg; 1641 1642 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, 1643 huge_encode_dev(data.cdev->devt), 1644 RDMA_NLDEV_ATTR_PAD); 1645 if (err) 1646 goto out_data; 1647 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, 1648 RDMA_NLDEV_ATTR_PAD); 1649 if (err) 1650 goto out_data; 1651 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, 1652 dev_name(data.cdev))) { 1653 err = -EMSGSIZE; 1654 goto out_data; 1655 } 1656 1657 nlmsg_end(msg, nlh); 1658 put_device(data.cdev); 1659 if (ibdev) 1660 ib_device_put(ibdev); 1661 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1662 1663 out_data: 1664 put_device(data.cdev); 1665 out_nlmsg: 1666 nlmsg_free(msg); 1667 out_put: 1668 if (ibdev) 1669 ib_device_put(ibdev); 1670 return err; 1671 } 1672 1673 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1674 struct netlink_ext_ack *extack) 1675 { 1676 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1677 struct sk_buff *msg; 1678 int err; 1679 1680 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1681 nldev_policy, extack); 1682 if (err) 1683 return err; 1684 1685 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1686 if (!msg) 1687 return -ENOMEM; 1688 1689 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1690 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1691 RDMA_NLDEV_CMD_SYS_GET), 1692 0, 0); 1693 1694 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, 1695 (u8)ib_devices_shared_netns); 1696 if (err) { 1697 nlmsg_free(msg); 1698 return err; 1699 } 1700 nlmsg_end(msg, nlh); 1701 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1702 } 1703 1704 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1705 struct netlink_ext_ack *extack) 1706 { 1707 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1708 u8 enable; 1709 int err; 1710 1711 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1712 nldev_policy, extack); 1713 if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) 1714 return -EINVAL; 1715 1716 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); 1717 /* Only 0 and 1 are supported */ 1718 if (enable > 1) 1719 return -EINVAL; 1720 1721 err = rdma_compatdev_set(enable); 1722 return err; 1723 } 1724 1725 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1726 struct netlink_ext_ack *extack) 1727 { 1728 u32 index, port, mode, mask = 0, qpn, cntn = 0; 1729 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1730 struct ib_device *device; 1731 struct sk_buff *msg; 1732 int ret; 1733 1734 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1735 nldev_policy, extack); 1736 /* Currently only counter for QP is supported */ 1737 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || 1738 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 1739 !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE]) 1740 return -EINVAL; 1741 1742 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 1743 return -EINVAL; 1744 1745 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1746 device = ib_device_get_by_index(sock_net(skb->sk), index); 1747 if (!device) 1748 return -EINVAL; 1749 1750 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1751 if (!rdma_is_port_valid(device, port)) { 1752 ret = -EINVAL; 1753 goto err; 1754 } 1755 1756 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1757 if (!msg) { 1758 ret = -ENOMEM; 1759 goto err; 1760 } 1761 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1762 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1763 RDMA_NLDEV_CMD_STAT_SET), 1764 0, 0); 1765 1766 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); 1767 if (mode == RDMA_COUNTER_MODE_AUTO) { 1768 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) 1769 mask = nla_get_u32( 1770 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); 1771 1772 ret = rdma_counter_set_auto_mode(device, port, 1773 mask ? true : false, mask); 1774 if (ret) 1775 goto err_msg; 1776 } else { 1777 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) 1778 goto err_msg; 1779 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); 1780 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { 1781 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); 1782 ret = rdma_counter_bind_qpn(device, port, qpn, cntn); 1783 } else { 1784 ret = rdma_counter_bind_qpn_alloc(device, port, 1785 qpn, &cntn); 1786 } 1787 if (ret) 1788 goto err_msg; 1789 1790 if (fill_nldev_handle(msg, device) || 1791 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 1792 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || 1793 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { 1794 ret = -EMSGSIZE; 1795 goto err_fill; 1796 } 1797 } 1798 1799 nlmsg_end(msg, nlh); 1800 ib_device_put(device); 1801 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1802 1803 err_fill: 1804 rdma_counter_unbind_qpn(device, port, qpn, cntn); 1805 err_msg: 1806 nlmsg_free(msg); 1807 err: 1808 ib_device_put(device); 1809 return ret; 1810 } 1811 1812 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1813 struct netlink_ext_ack *extack) 1814 { 1815 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1816 struct ib_device *device; 1817 struct sk_buff *msg; 1818 u32 index, port, qpn, cntn; 1819 int ret; 1820 1821 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1822 nldev_policy, extack); 1823 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || 1824 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || 1825 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] || 1826 !tb[RDMA_NLDEV_ATTR_RES_LQPN]) 1827 return -EINVAL; 1828 1829 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 1830 return -EINVAL; 1831 1832 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1833 device = ib_device_get_by_index(sock_net(skb->sk), index); 1834 if (!device) 1835 return -EINVAL; 1836 1837 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1838 if (!rdma_is_port_valid(device, port)) { 1839 ret = -EINVAL; 1840 goto err; 1841 } 1842 1843 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1844 if (!msg) { 1845 ret = -ENOMEM; 1846 goto err; 1847 } 1848 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1849 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1850 RDMA_NLDEV_CMD_STAT_SET), 1851 0, 0); 1852 1853 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); 1854 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); 1855 if (fill_nldev_handle(msg, device) || 1856 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 1857 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || 1858 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { 1859 ret = -EMSGSIZE; 1860 goto err_fill; 1861 } 1862 1863 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); 1864 if (ret) 1865 goto err_fill; 1866 1867 nlmsg_end(msg, nlh); 1868 ib_device_put(device); 1869 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1870 1871 err_fill: 1872 nlmsg_free(msg); 1873 err: 1874 ib_device_put(device); 1875 return ret; 1876 } 1877 1878 static int stat_get_doit_default_counter(struct sk_buff *skb, 1879 struct nlmsghdr *nlh, 1880 struct netlink_ext_ack *extack, 1881 struct nlattr *tb[]) 1882 { 1883 struct rdma_hw_stats *stats; 1884 struct nlattr *table_attr; 1885 struct ib_device *device; 1886 int ret, num_cnts, i; 1887 struct sk_buff *msg; 1888 u32 index, port; 1889 u64 v; 1890 1891 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 1892 return -EINVAL; 1893 1894 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1895 device = ib_device_get_by_index(sock_net(skb->sk), index); 1896 if (!device) 1897 return -EINVAL; 1898 1899 if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) { 1900 ret = -EINVAL; 1901 goto err; 1902 } 1903 1904 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1905 if (!rdma_is_port_valid(device, port)) { 1906 ret = -EINVAL; 1907 goto err; 1908 } 1909 1910 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1911 if (!msg) { 1912 ret = -ENOMEM; 1913 goto err; 1914 } 1915 1916 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1917 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1918 RDMA_NLDEV_CMD_STAT_GET), 1919 0, 0); 1920 1921 if (fill_nldev_handle(msg, device) || 1922 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { 1923 ret = -EMSGSIZE; 1924 goto err_msg; 1925 } 1926 1927 stats = device->port_data ? device->port_data[port].hw_stats : NULL; 1928 if (stats == NULL) { 1929 ret = -EINVAL; 1930 goto err_msg; 1931 } 1932 mutex_lock(&stats->lock); 1933 1934 num_cnts = device->ops.get_hw_stats(device, stats, port, 0); 1935 if (num_cnts < 0) { 1936 ret = -EINVAL; 1937 goto err_stats; 1938 } 1939 1940 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 1941 if (!table_attr) { 1942 ret = -EMSGSIZE; 1943 goto err_stats; 1944 } 1945 for (i = 0; i < num_cnts; i++) { 1946 v = stats->value[i] + 1947 rdma_counter_get_hwstat_value(device, port, i); 1948 if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) { 1949 ret = -EMSGSIZE; 1950 goto err_table; 1951 } 1952 } 1953 nla_nest_end(msg, table_attr); 1954 1955 mutex_unlock(&stats->lock); 1956 nlmsg_end(msg, nlh); 1957 ib_device_put(device); 1958 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1959 1960 err_table: 1961 nla_nest_cancel(msg, table_attr); 1962 err_stats: 1963 mutex_unlock(&stats->lock); 1964 err_msg: 1965 nlmsg_free(msg); 1966 err: 1967 ib_device_put(device); 1968 return ret; 1969 } 1970 1971 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, 1972 struct netlink_ext_ack *extack, struct nlattr *tb[]) 1973 1974 { 1975 static enum rdma_nl_counter_mode mode; 1976 static enum rdma_nl_counter_mask mask; 1977 struct ib_device *device; 1978 struct sk_buff *msg; 1979 u32 index, port; 1980 int ret; 1981 1982 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) 1983 return nldev_res_get_counter_doit(skb, nlh, extack); 1984 1985 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] || 1986 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 1987 return -EINVAL; 1988 1989 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1990 device = ib_device_get_by_index(sock_net(skb->sk), index); 1991 if (!device) 1992 return -EINVAL; 1993 1994 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1995 if (!rdma_is_port_valid(device, port)) { 1996 ret = -EINVAL; 1997 goto err; 1998 } 1999 2000 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2001 if (!msg) { 2002 ret = -ENOMEM; 2003 goto err; 2004 } 2005 2006 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2007 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2008 RDMA_NLDEV_CMD_STAT_GET), 2009 0, 0); 2010 2011 ret = rdma_counter_get_mode(device, port, &mode, &mask); 2012 if (ret) 2013 goto err_msg; 2014 2015 if (fill_nldev_handle(msg, device) || 2016 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 2017 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { 2018 ret = -EMSGSIZE; 2019 goto err_msg; 2020 } 2021 2022 if ((mode == RDMA_COUNTER_MODE_AUTO) && 2023 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { 2024 ret = -EMSGSIZE; 2025 goto err_msg; 2026 } 2027 2028 nlmsg_end(msg, nlh); 2029 ib_device_put(device); 2030 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2031 2032 err_msg: 2033 nlmsg_free(msg); 2034 err: 2035 ib_device_put(device); 2036 return ret; 2037 } 2038 2039 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2040 struct netlink_ext_ack *extack) 2041 { 2042 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2043 int ret; 2044 2045 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2046 nldev_policy, extack); 2047 if (ret) 2048 return -EINVAL; 2049 2050 if (!tb[RDMA_NLDEV_ATTR_STAT_RES]) 2051 return stat_get_doit_default_counter(skb, nlh, extack, tb); 2052 2053 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { 2054 case RDMA_NLDEV_ATTR_RES_QP: 2055 ret = stat_get_doit_qp(skb, nlh, extack, tb); 2056 break; 2057 case RDMA_NLDEV_ATTR_RES_MR: 2058 ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR, 2059 fill_stat_mr_entry); 2060 break; 2061 default: 2062 ret = -EINVAL; 2063 break; 2064 } 2065 2066 return ret; 2067 } 2068 2069 static int nldev_stat_get_dumpit(struct sk_buff *skb, 2070 struct netlink_callback *cb) 2071 { 2072 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2073 int ret; 2074 2075 ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2076 nldev_policy, NULL); 2077 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) 2078 return -EINVAL; 2079 2080 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { 2081 case RDMA_NLDEV_ATTR_RES_QP: 2082 ret = nldev_res_get_counter_dumpit(skb, cb); 2083 break; 2084 case RDMA_NLDEV_ATTR_RES_MR: 2085 ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR, 2086 fill_stat_mr_entry); 2087 break; 2088 default: 2089 ret = -EINVAL; 2090 break; 2091 } 2092 2093 return ret; 2094 } 2095 2096 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { 2097 [RDMA_NLDEV_CMD_GET] = { 2098 .doit = nldev_get_doit, 2099 .dump = nldev_get_dumpit, 2100 }, 2101 [RDMA_NLDEV_CMD_GET_CHARDEV] = { 2102 .doit = nldev_get_chardev, 2103 }, 2104 [RDMA_NLDEV_CMD_SET] = { 2105 .doit = nldev_set_doit, 2106 .flags = RDMA_NL_ADMIN_PERM, 2107 }, 2108 [RDMA_NLDEV_CMD_NEWLINK] = { 2109 .doit = nldev_newlink, 2110 .flags = RDMA_NL_ADMIN_PERM, 2111 }, 2112 [RDMA_NLDEV_CMD_DELLINK] = { 2113 .doit = nldev_dellink, 2114 .flags = RDMA_NL_ADMIN_PERM, 2115 }, 2116 [RDMA_NLDEV_CMD_PORT_GET] = { 2117 .doit = nldev_port_get_doit, 2118 .dump = nldev_port_get_dumpit, 2119 }, 2120 [RDMA_NLDEV_CMD_RES_GET] = { 2121 .doit = nldev_res_get_doit, 2122 .dump = nldev_res_get_dumpit, 2123 }, 2124 [RDMA_NLDEV_CMD_RES_QP_GET] = { 2125 .doit = nldev_res_get_qp_doit, 2126 .dump = nldev_res_get_qp_dumpit, 2127 }, 2128 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { 2129 .doit = nldev_res_get_cm_id_doit, 2130 .dump = nldev_res_get_cm_id_dumpit, 2131 }, 2132 [RDMA_NLDEV_CMD_RES_CQ_GET] = { 2133 .doit = nldev_res_get_cq_doit, 2134 .dump = nldev_res_get_cq_dumpit, 2135 }, 2136 [RDMA_NLDEV_CMD_RES_MR_GET] = { 2137 .doit = nldev_res_get_mr_doit, 2138 .dump = nldev_res_get_mr_dumpit, 2139 }, 2140 [RDMA_NLDEV_CMD_RES_PD_GET] = { 2141 .doit = nldev_res_get_pd_doit, 2142 .dump = nldev_res_get_pd_dumpit, 2143 }, 2144 [RDMA_NLDEV_CMD_SYS_GET] = { 2145 .doit = nldev_sys_get_doit, 2146 }, 2147 [RDMA_NLDEV_CMD_SYS_SET] = { 2148 .doit = nldev_set_sys_set_doit, 2149 }, 2150 [RDMA_NLDEV_CMD_STAT_SET] = { 2151 .doit = nldev_stat_set_doit, 2152 .flags = RDMA_NL_ADMIN_PERM, 2153 }, 2154 [RDMA_NLDEV_CMD_STAT_GET] = { 2155 .doit = nldev_stat_get_doit, 2156 .dump = nldev_stat_get_dumpit, 2157 }, 2158 [RDMA_NLDEV_CMD_STAT_DEL] = { 2159 .doit = nldev_stat_del_doit, 2160 .flags = RDMA_NL_ADMIN_PERM, 2161 }, 2162 [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { 2163 .doit = nldev_res_get_qp_raw_doit, 2164 .dump = nldev_res_get_qp_raw_dumpit, 2165 .flags = RDMA_NL_ADMIN_PERM, 2166 }, 2167 [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { 2168 .doit = nldev_res_get_cq_raw_doit, 2169 .dump = nldev_res_get_cq_raw_dumpit, 2170 .flags = RDMA_NL_ADMIN_PERM, 2171 }, 2172 [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { 2173 .doit = nldev_res_get_mr_raw_doit, 2174 .dump = nldev_res_get_mr_raw_dumpit, 2175 .flags = RDMA_NL_ADMIN_PERM, 2176 }, 2177 }; 2178 2179 void __init nldev_init(void) 2180 { 2181 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); 2182 } 2183 2184 void __exit nldev_exit(void) 2185 { 2186 rdma_nl_unregister(RDMA_NL_NLDEV); 2187 } 2188 2189 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); 2190