1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _CORE_PRIV_H 34 #define _CORE_PRIV_H 35 36 #include <linux/list.h> 37 #include <linux/spinlock.h> 38 #include <linux/cgroup_rdma.h> 39 40 #include <rdma/ib_verbs.h> 41 #include <rdma/opa_addr.h> 42 #include <rdma/ib_mad.h> 43 #include "mad_priv.h" 44 45 struct pkey_index_qp_list { 46 struct list_head pkey_index_list; 47 u16 pkey_index; 48 /* Lock to hold while iterating the qp_list. */ 49 spinlock_t qp_list_lock; 50 struct list_head qp_list; 51 }; 52 53 #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) 54 int cma_configfs_init(void); 55 void cma_configfs_exit(void); 56 #else 57 static inline int cma_configfs_init(void) 58 { 59 return 0; 60 } 61 62 static inline void cma_configfs_exit(void) 63 { 64 } 65 #endif 66 struct cma_device; 67 void cma_ref_dev(struct cma_device *cma_dev); 68 void cma_deref_dev(struct cma_device *cma_dev); 69 typedef bool (*cma_device_filter)(struct ib_device *, void *); 70 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 71 void *cookie); 72 int cma_get_default_gid_type(struct cma_device *cma_dev, 73 unsigned int port); 74 int cma_set_default_gid_type(struct cma_device *cma_dev, 75 unsigned int port, 76 enum ib_gid_type default_gid_type); 77 int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port); 78 int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port, 79 u8 default_roce_tos); 80 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); 81 82 int ib_device_register_sysfs(struct ib_device *device, 83 int (*port_callback)(struct ib_device *, 84 u8, struct kobject *)); 85 void ib_device_unregister_sysfs(struct ib_device *device); 86 87 void ib_cache_setup(void); 88 void ib_cache_cleanup(void); 89 90 typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, 91 struct net_device *idev, void *cookie); 92 93 typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port, 94 struct net_device *idev, void *cookie); 95 96 void ib_enum_roce_netdev(struct ib_device *ib_dev, 97 roce_netdev_filter filter, 98 void *filter_cookie, 99 roce_netdev_callback cb, 100 void *cookie); 101 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 102 void *filter_cookie, 103 roce_netdev_callback cb, 104 void *cookie); 105 106 typedef int (*nldev_callback)(struct ib_device *device, 107 struct sk_buff *skb, 108 struct netlink_callback *cb, 109 unsigned int idx); 110 111 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, 112 struct netlink_callback *cb); 113 114 enum ib_cache_gid_default_mode { 115 IB_CACHE_GID_DEFAULT_MODE_SET, 116 IB_CACHE_GID_DEFAULT_MODE_DELETE 117 }; 118 119 int ib_cache_gid_parse_type_str(const char *buf); 120 121 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); 122 123 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, 124 struct net_device *ndev, 125 unsigned long gid_type_mask, 126 enum ib_cache_gid_default_mode mode); 127 128 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, 129 union ib_gid *gid, struct ib_gid_attr *attr); 130 131 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, 132 union ib_gid *gid, struct ib_gid_attr *attr); 133 134 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 135 struct net_device *ndev); 136 137 int roce_gid_mgmt_init(void); 138 void roce_gid_mgmt_cleanup(void); 139 140 int roce_rescan_device(struct ib_device *ib_dev); 141 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); 142 143 int ib_cache_setup_one(struct ib_device *device); 144 void ib_cache_cleanup_one(struct ib_device *device); 145 void ib_cache_release_one(struct ib_device *device); 146 147 #ifdef CONFIG_CGROUP_RDMA 148 int ib_device_register_rdmacg(struct ib_device *device); 149 void ib_device_unregister_rdmacg(struct ib_device *device); 150 151 int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, 152 struct ib_device *device, 153 enum rdmacg_resource_type resource_index); 154 155 void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, 156 struct ib_device *device, 157 enum rdmacg_resource_type resource_index); 158 #else 159 static inline int ib_device_register_rdmacg(struct ib_device *device) 160 { return 0; } 161 162 static inline void ib_device_unregister_rdmacg(struct ib_device *device) 163 { } 164 165 static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj, 166 struct ib_device *device, 167 enum rdmacg_resource_type resource_index) 168 { return 0; } 169 170 static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj, 171 struct ib_device *device, 172 enum rdmacg_resource_type resource_index) 173 { } 174 #endif 175 176 static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, 177 struct net_device *upper) 178 { 179 return netdev_has_upper_dev_all_rcu(dev, upper); 180 } 181 182 int addr_init(void); 183 void addr_cleanup(void); 184 185 int ib_mad_init(void); 186 void ib_mad_cleanup(void); 187 188 int ib_sa_init(void); 189 void ib_sa_cleanup(void); 190 191 int rdma_nl_init(void); 192 void rdma_nl_exit(void); 193 194 /** 195 * Check if there are any listeners to the netlink group 196 * @group: the netlink group ID 197 * Returns 0 on success or a negative for no listeners. 198 */ 199 int ibnl_chk_listeners(unsigned int group); 200 201 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 202 struct nlmsghdr *nlh, 203 struct netlink_ext_ack *extack); 204 int ib_nl_handle_set_timeout(struct sk_buff *skb, 205 struct nlmsghdr *nlh, 206 struct netlink_ext_ack *extack); 207 int ib_nl_handle_ip_res_resp(struct sk_buff *skb, 208 struct nlmsghdr *nlh, 209 struct netlink_ext_ack *extack); 210 211 int ib_get_cached_subnet_prefix(struct ib_device *device, 212 u8 port_num, 213 u64 *sn_pfx); 214 215 #ifdef CONFIG_SECURITY_INFINIBAND 216 int ib_security_pkey_access(struct ib_device *dev, 217 u8 port_num, 218 u16 pkey_index, 219 void *sec); 220 221 void ib_security_destroy_port_pkey_list(struct ib_device *device); 222 223 void ib_security_cache_change(struct ib_device *device, 224 u8 port_num, 225 u64 subnet_prefix); 226 227 int ib_security_modify_qp(struct ib_qp *qp, 228 struct ib_qp_attr *qp_attr, 229 int qp_attr_mask, 230 struct ib_udata *udata); 231 232 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); 233 void ib_destroy_qp_security_begin(struct ib_qp_security *sec); 234 void ib_destroy_qp_security_abort(struct ib_qp_security *sec); 235 void ib_destroy_qp_security_end(struct ib_qp_security *sec); 236 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); 237 void ib_close_shared_qp_security(struct ib_qp_security *sec); 238 int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 239 enum ib_qp_type qp_type); 240 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); 241 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); 242 #else 243 static inline int ib_security_pkey_access(struct ib_device *dev, 244 u8 port_num, 245 u16 pkey_index, 246 void *sec) 247 { 248 return 0; 249 } 250 251 static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) 252 { 253 } 254 255 static inline void ib_security_cache_change(struct ib_device *device, 256 u8 port_num, 257 u64 subnet_prefix) 258 { 259 } 260 261 static inline int ib_security_modify_qp(struct ib_qp *qp, 262 struct ib_qp_attr *qp_attr, 263 int qp_attr_mask, 264 struct ib_udata *udata) 265 { 266 return qp->device->modify_qp(qp->real_qp, 267 qp_attr, 268 qp_attr_mask, 269 udata); 270 } 271 272 static inline int ib_create_qp_security(struct ib_qp *qp, 273 struct ib_device *dev) 274 { 275 return 0; 276 } 277 278 static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) 279 { 280 } 281 282 static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) 283 { 284 } 285 286 static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) 287 { 288 } 289 290 static inline int ib_open_shared_qp_security(struct ib_qp *qp, 291 struct ib_device *dev) 292 { 293 return 0; 294 } 295 296 static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) 297 { 298 } 299 300 static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 301 enum ib_qp_type qp_type) 302 { 303 return 0; 304 } 305 306 static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) 307 { 308 } 309 310 static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, 311 u16 pkey_index) 312 { 313 return 0; 314 } 315 #endif 316 317 struct ib_device *__ib_device_get_by_index(u32 ifindex); 318 /* RDMA device netlink */ 319 void nldev_init(void); 320 void nldev_exit(void); 321 #endif /* _CORE_PRIV_H */ 322