1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef MLX4_IB_H 35 #define MLX4_IB_H 36 37 #include <linux/compiler.h> 38 #include <linux/list.h> 39 #include <linux/mutex.h> 40 41 #include <rdma/ib_verbs.h> 42 #include <rdma/ib_umem.h> 43 44 #include <linux/mlx4/device.h> 45 #include <linux/mlx4/doorbell.h> 46 47 #define MLX4_IB_DRV_NAME "mlx4_ib" 48 49 #ifdef pr_fmt 50 #undef pr_fmt 51 #endif 52 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ 53 54 #define mlx4_ib_warn(ibdev, format, arg...) \ 55 dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) 56 57 enum { 58 MLX4_IB_SQ_MIN_WQE_SHIFT = 6, 59 MLX4_IB_MAX_HEADROOM = 2048 60 }; 61 62 #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) 63 #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) 64 65 struct mlx4_ib_ucontext { 66 struct ib_ucontext ibucontext; 67 struct mlx4_uar uar; 68 struct list_head db_page_list; 69 struct mutex db_page_mutex; 70 }; 71 72 struct mlx4_ib_pd { 73 struct ib_pd ibpd; 74 u32 pdn; 75 }; 76 77 struct mlx4_ib_xrcd { 78 struct ib_xrcd ibxrcd; 79 u32 xrcdn; 80 struct ib_pd *pd; 81 struct ib_cq *cq; 82 }; 83 84 struct mlx4_ib_cq_buf { 85 struct mlx4_buf buf; 86 struct mlx4_mtt mtt; 87 }; 88 89 struct mlx4_ib_cq_resize { 90 struct mlx4_ib_cq_buf buf; 91 int cqe; 92 }; 93 94 struct mlx4_ib_cq { 95 struct ib_cq ibcq; 96 struct mlx4_cq mcq; 97 struct mlx4_ib_cq_buf buf; 98 struct mlx4_ib_cq_resize *resize_buf; 99 struct mlx4_db db; 100 spinlock_t lock; 101 struct mutex resize_mutex; 102 struct ib_umem *umem; 103 struct ib_umem *resize_umem; 104 }; 105 106 struct mlx4_ib_mr { 107 struct ib_mr ibmr; 108 struct mlx4_mr mmr; 109 struct ib_umem *umem; 110 }; 111 112 struct mlx4_ib_fast_reg_page_list { 113 struct ib_fast_reg_page_list ibfrpl; 114 __be64 *mapped_page_list; 115 dma_addr_t map; 116 }; 117 118 struct mlx4_ib_fmr { 119 struct ib_fmr ibfmr; 120 struct mlx4_fmr mfmr; 121 }; 122 123 struct mlx4_ib_wq { 124 u64 *wrid; 125 spinlock_t lock; 126 int wqe_cnt; 127 int max_post; 128 int max_gs; 129 int offset; 130 int wqe_shift; 131 unsigned head; 132 unsigned tail; 133 }; 134 135 enum mlx4_ib_qp_flags { 136 MLX4_IB_QP_LSO = 1 << 0, 137 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 138 }; 139 140 struct mlx4_ib_gid_entry { 141 struct list_head list; 142 union ib_gid gid; 143 int added; 144 u8 port; 145 }; 146 147 struct mlx4_ib_qp { 148 struct ib_qp ibqp; 149 struct mlx4_qp mqp; 150 struct mlx4_buf buf; 151 152 struct mlx4_db db; 153 struct mlx4_ib_wq rq; 154 155 u32 doorbell_qpn; 156 __be32 sq_signal_bits; 157 unsigned sq_next_wqe; 158 int sq_max_wqes_per_wr; 159 int sq_spare_wqes; 160 struct mlx4_ib_wq sq; 161 162 struct ib_umem *umem; 163 struct mlx4_mtt mtt; 164 int buf_size; 165 struct mutex mutex; 166 u16 xrcdn; 167 u32 flags; 168 u8 port; 169 u8 alt_port; 170 u8 atomic_rd_en; 171 u8 resp_depth; 172 u8 sq_no_prefetch; 173 u8 state; 174 int mlx_type; 175 struct list_head gid_list; 176 struct list_head steering_rules; 177 }; 178 179 struct mlx4_ib_srq { 180 struct ib_srq ibsrq; 181 struct mlx4_srq msrq; 182 struct mlx4_buf buf; 183 struct mlx4_db db; 184 u64 *wrid; 185 spinlock_t lock; 186 int head; 187 int tail; 188 u16 wqe_ctr; 189 struct ib_umem *umem; 190 struct mlx4_mtt mtt; 191 struct mutex mutex; 192 }; 193 194 struct mlx4_ib_ah { 195 struct ib_ah ibah; 196 union mlx4_ext_av av; 197 }; 198 199 struct mlx4_ib_iboe { 200 spinlock_t lock; 201 struct net_device *netdevs[MLX4_MAX_PORTS]; 202 struct notifier_block nb; 203 union ib_gid gid_table[MLX4_MAX_PORTS][128]; 204 }; 205 206 struct mlx4_ib_dev { 207 struct ib_device ib_dev; 208 struct mlx4_dev *dev; 209 int num_ports; 210 void __iomem *uar_map; 211 212 struct mlx4_uar priv_uar; 213 u32 priv_pdn; 214 MLX4_DECLARE_DOORBELL_LOCK(uar_lock); 215 216 struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2]; 217 struct ib_ah *sm_ah[MLX4_MAX_PORTS]; 218 spinlock_t sm_lock; 219 220 struct mutex cap_mask_mutex; 221 bool ib_active; 222 struct mlx4_ib_iboe iboe; 223 int counters[MLX4_MAX_PORTS]; 224 int *eq_table; 225 int eq_added; 226 }; 227 228 struct ib_event_work { 229 struct work_struct work; 230 struct mlx4_ib_dev *ib_dev; 231 struct mlx4_eqe ib_eqe; 232 }; 233 234 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 235 { 236 return container_of(ibdev, struct mlx4_ib_dev, ib_dev); 237 } 238 239 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 240 { 241 return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext); 242 } 243 244 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd) 245 { 246 return container_of(ibpd, struct mlx4_ib_pd, ibpd); 247 } 248 249 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) 250 { 251 return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd); 252 } 253 254 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) 255 { 256 return container_of(ibcq, struct mlx4_ib_cq, ibcq); 257 } 258 259 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq) 260 { 261 return container_of(mcq, struct mlx4_ib_cq, mcq); 262 } 263 264 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) 265 { 266 return container_of(ibmr, struct mlx4_ib_mr, ibmr); 267 } 268 269 static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) 270 { 271 return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); 272 } 273 274 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) 275 { 276 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); 277 } 278 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp) 279 { 280 return container_of(ibqp, struct mlx4_ib_qp, ibqp); 281 } 282 283 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp) 284 { 285 return container_of(mqp, struct mlx4_ib_qp, mqp); 286 } 287 288 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq) 289 { 290 return container_of(ibsrq, struct mlx4_ib_srq, ibsrq); 291 } 292 293 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq) 294 { 295 return container_of(msrq, struct mlx4_ib_srq, msrq); 296 } 297 298 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) 299 { 300 return container_of(ibah, struct mlx4_ib_ah, ibah); 301 } 302 303 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 304 struct mlx4_db *db); 305 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); 306 307 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); 308 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, 309 struct ib_umem *umem); 310 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 311 u64 virt_addr, int access_flags, 312 struct ib_udata *udata); 313 int mlx4_ib_dereg_mr(struct ib_mr *mr); 314 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, 315 int max_page_list_len); 316 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, 317 int page_list_len); 318 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 319 320 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 321 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 322 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 323 struct ib_ucontext *context, 324 struct ib_udata *udata); 325 int mlx4_ib_destroy_cq(struct ib_cq *cq); 326 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 327 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); 328 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 329 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 330 331 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 332 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); 333 int mlx4_ib_destroy_ah(struct ib_ah *ah); 334 335 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, 336 struct ib_srq_init_attr *init_attr, 337 struct ib_udata *udata); 338 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 339 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); 340 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 341 int mlx4_ib_destroy_srq(struct ib_srq *srq); 342 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); 343 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 344 struct ib_recv_wr **bad_wr); 345 346 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, 347 struct ib_qp_init_attr *init_attr, 348 struct ib_udata *udata); 349 int mlx4_ib_destroy_qp(struct ib_qp *qp); 350 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 351 int attr_mask, struct ib_udata *udata); 352 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 353 struct ib_qp_init_attr *qp_init_attr); 354 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 355 struct ib_send_wr **bad_wr); 356 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 357 struct ib_recv_wr **bad_wr); 358 359 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, 360 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 361 void *in_mad, void *response_mad); 362 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 363 struct ib_wc *in_wc, struct ib_grh *in_grh, 364 struct ib_mad *in_mad, struct ib_mad *out_mad); 365 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); 366 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); 367 368 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags, 369 struct ib_fmr_attr *fmr_attr); 370 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, 371 u64 iova); 372 int mlx4_ib_unmap_fmr(struct list_head *fmr_list); 373 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr); 374 375 int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, 376 u8 *mac, int *is_mcast, u8 port); 377 378 static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) 379 { 380 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; 381 382 if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) 383 return 1; 384 385 return !!(ah->av.ib.g_slid & 0x80); 386 } 387 388 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 389 union ib_gid *gid); 390 391 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, 392 enum ib_event_type type); 393 394 #endif /* MLX4_IB_H */ 395