1 #ifndef DEF_RDMA_VT_H 2 #define DEF_RDMA_VT_H 3 4 /* 5 * Copyright(c) 2016 Intel Corporation. 6 * 7 * This file is provided under a dual BSD/GPLv2 license. When using or 8 * redistributing this file, you may do so under either license. 9 * 10 * GPL LICENSE SUMMARY 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * BSD LICENSE 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 27 * - Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * - Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in 31 * the documentation and/or other materials provided with the 32 * distribution. 33 * - Neither the name of Intel Corporation nor the names of its 34 * contributors may be used to endorse or promote products derived 35 * from this software without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 * 49 */ 50 51 /* 52 * Structure that low level drivers will populate in order to register with the 53 * rdmavt layer. 54 */ 55 56 #include <linux/spinlock.h> 57 #include <linux/list.h> 58 #include <linux/hash.h> 59 #include <rdma/ib_verbs.h> 60 #include <rdma/rdmavt_mr.h> 61 #include <rdma/rdmavt_qp.h> 62 63 #define RVT_MAX_PKEY_VALUES 16 64 65 struct rvt_ibport { 66 struct rvt_qp __rcu *qp[2]; 67 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ 68 struct rb_root mcast_tree; 69 spinlock_t lock; /* protect changes in this struct */ 70 71 /* non-zero when timer is set */ 72 unsigned long mkey_lease_timeout; 73 unsigned long trap_timeout; 74 __be64 gid_prefix; /* in network order */ 75 __be64 mkey; 76 u64 tid; 77 u32 port_cap_flags; 78 u32 pma_sample_start; 79 u32 pma_sample_interval; 80 __be16 pma_counter_select[5]; 81 u16 pma_tag; 82 u16 mkey_lease_period; 83 u16 sm_lid; 84 u8 sm_sl; 85 u8 mkeyprot; 86 u8 subnet_timeout; 87 u8 vl_high_limit; 88 89 /* 90 * Driver is expected to keep these up to date. These 91 * counters are informational only and not required to be 92 * completely accurate. 93 */ 94 u64 n_rc_resends; 95 u64 n_seq_naks; 96 u64 n_rdma_seq; 97 u64 n_rnr_naks; 98 u64 n_other_naks; 99 u64 n_loop_pkts; 100 u64 n_pkt_drops; 101 u64 n_vl15_dropped; 102 u64 n_rc_timeouts; 103 u64 n_dmawait; 104 u64 n_unaligned; 105 u64 n_rc_dupreq; 106 u64 n_rc_seqnak; 107 u16 pkey_violations; 108 u16 qkey_violations; 109 u16 mkey_violations; 110 111 /* Hot-path per CPU counters to avoid cacheline trading to update */ 112 u64 z_rc_acks; 113 u64 z_rc_qacks; 114 u64 z_rc_delayed_comp; 115 u64 __percpu *rc_acks; 116 u64 __percpu *rc_qacks; 117 u64 __percpu *rc_delayed_comp; 118 119 void *priv; /* driver private data */ 120 121 /* 122 * The pkey table is allocated and maintained by the driver. Drivers 123 * need to have access to this before registering with rdmav. However 124 * rdmavt will need access to it so drivers need to proviee this during 125 * the attach port API call. 126 */ 127 u16 *pkey_table; 128 129 struct rvt_ah *sm_ah; 130 }; 131 132 #define RVT_CQN_MAX 16 /* maximum length of cq name */ 133 134 /* 135 * Things that are driver specific, module parameters in hfi1 and qib 136 */ 137 struct rvt_driver_params { 138 struct ib_device_attr props; 139 140 /* 141 * Anything driver specific that is not covered by props 142 * For instance special module parameters. Goes here. 143 */ 144 unsigned int lkey_table_size; 145 unsigned int qp_table_size; 146 int qpn_start; 147 int qpn_inc; 148 int qpn_res_start; 149 int qpn_res_end; 150 int nports; 151 int npkeys; 152 char cq_name[RVT_CQN_MAX]; 153 int node; 154 int psn_mask; 155 int psn_shift; 156 int psn_modify_mask; 157 u32 core_cap_flags; 158 u32 max_mad_size; 159 u8 qos_shift; 160 u8 max_rdma_atomic; 161 u8 reserved_operations; 162 }; 163 164 /* Protection domain */ 165 struct rvt_pd { 166 struct ib_pd ibpd; 167 bool user; 168 }; 169 170 /* Address handle */ 171 struct rvt_ah { 172 struct ib_ah ibah; 173 struct rdma_ah_attr attr; 174 atomic_t refcount; 175 u8 vl; 176 u8 log_pmtu; 177 }; 178 179 struct rvt_dev_info; 180 struct rvt_swqe; 181 struct rvt_driver_provided { 182 /* 183 * Which functions are required depends on which verbs rdmavt is 184 * providing and which verbs the driver is overriding. See 185 * check_support() for details. 186 */ 187 188 /* hot path calldowns in a single cacheline */ 189 190 /* 191 * Give the driver a notice that there is send work to do. It is up to 192 * the driver to generally push the packets out, this just queues the 193 * work with the driver. There are two variants here. The no_lock 194 * version requires the s_lock not to be held. The other assumes the 195 * s_lock is held. 196 */ 197 void (*schedule_send)(struct rvt_qp *qp); 198 void (*schedule_send_no_lock)(struct rvt_qp *qp); 199 200 /* Driver specific work request checking */ 201 int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe); 202 203 /* 204 * Sometimes rdmavt needs to kick the driver's send progress. That is 205 * done by this call back. 206 */ 207 void (*do_send)(struct rvt_qp *qp); 208 209 /* Passed to ib core registration. Callback to create syfs files */ 210 int (*port_callback)(struct ib_device *, u8, struct kobject *); 211 212 /* 213 * Returns a string to represent the device for which is being 214 * registered. This is primarily used for error and debug messages on 215 * the console. 216 */ 217 const char * (*get_card_name)(struct rvt_dev_info *rdi); 218 219 /* 220 * Returns a pointer to the undelying hardware's PCI device. This is 221 * used to display information as to what hardware is being referenced 222 * in an output message 223 */ 224 struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi); 225 226 /* 227 * Allocate a private queue pair data structure for driver specific 228 * information which is opaque to rdmavt. Errors are returned via 229 * ERR_PTR(err). The driver is free to return NULL or a valid 230 * pointer. 231 */ 232 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 233 gfp_t gfp); 234 235 /* 236 * Free the driver's private qp structure. 237 */ 238 void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp); 239 240 /* 241 * Inform the driver the particular qp in quesiton has been reset so 242 * that it can clean up anything it needs to. 243 */ 244 void (*notify_qp_reset)(struct rvt_qp *qp); 245 246 /* 247 * Get a path mtu from the driver based on qp attributes. 248 */ 249 int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 250 struct ib_qp_attr *attr); 251 252 /* 253 * Notify driver that it needs to flush any outstanding IO requests that 254 * are waiting on a qp. 255 */ 256 void (*flush_qp_waiters)(struct rvt_qp *qp); 257 258 /* 259 * Notify driver to stop its queue of sending packets. Nothing else 260 * should be posted to the queue pair after this has been called. 261 */ 262 void (*stop_send_queue)(struct rvt_qp *qp); 263 264 /* 265 * Have the drivr drain any in progress operations 266 */ 267 void (*quiesce_qp)(struct rvt_qp *qp); 268 269 /* 270 * Inform the driver a qp has went to error state. 271 */ 272 void (*notify_error_qp)(struct rvt_qp *qp); 273 274 /* 275 * Get an MTU for a qp. 276 */ 277 u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 278 u32 pmtu); 279 /* 280 * Convert an mtu to a path mtu 281 */ 282 int (*mtu_to_path_mtu)(u32 mtu); 283 284 /* 285 * Get the guid of a port in big endian byte order 286 */ 287 int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, 288 int guid_index, __be64 *guid); 289 290 /* 291 * Query driver for the state of the port. 292 */ 293 int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num, 294 struct ib_port_attr *props); 295 296 /* 297 * Tell driver to shutdown a port 298 */ 299 int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num); 300 301 /* Tell driver to send a trap for changed port capabilities */ 302 void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num); 303 304 /* 305 * The following functions can be safely ignored completely. Any use of 306 * these is checked for NULL before blindly calling. Rdmavt should also 307 * be functional if drivers omit these. 308 */ 309 310 /* Called to inform the driver that all qps should now be freed. */ 311 unsigned (*free_all_qps)(struct rvt_dev_info *rdi); 312 313 /* Driver specific AH validation */ 314 int (*check_ah)(struct ib_device *, struct rdma_ah_attr *); 315 316 /* Inform the driver a new AH has been created */ 317 void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *, 318 struct rvt_ah *); 319 320 /* Let the driver pick the next queue pair number*/ 321 int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 322 enum ib_qp_type type, u8 port_num, gfp_t gfp); 323 324 /* Determine if its safe or allowed to modify the qp */ 325 int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, 326 int attr_mask, struct ib_udata *udata); 327 328 /* Driver specific QP modification/notification-of */ 329 void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, 330 int attr_mask, struct ib_udata *udata); 331 332 /* Notify driver a mad agent has been created */ 333 void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx); 334 335 /* Notify driver a mad agent has been removed */ 336 void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx); 337 338 /* Notify driver to restart rc */ 339 void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait); 340 }; 341 342 struct rvt_dev_info { 343 struct ib_device ibdev; /* Keep this first. Nothing above here */ 344 345 /* 346 * Prior to calling for registration the driver will be responsible for 347 * allocating space for this structure. 348 * 349 * The driver will also be responsible for filling in certain members of 350 * dparms.props. The driver needs to fill in dparms exactly as it would 351 * want values reported to a ULP. This will be returned to the caller 352 * in rdmavt's device. The driver should also therefore refrain from 353 * modifying this directly after registration with rdmavt. 354 */ 355 356 /* Driver specific properties */ 357 struct rvt_driver_params dparms; 358 359 /* post send table */ 360 const struct rvt_operation_params *post_parms; 361 362 /* Driver specific helper functions */ 363 struct rvt_driver_provided driver_f; 364 365 struct rvt_mregion __rcu *dma_mr; 366 struct rvt_lkey_table lkey_table; 367 368 /* Internal use */ 369 int n_pds_allocated; 370 spinlock_t n_pds_lock; /* Protect pd allocated count */ 371 372 int n_ahs_allocated; 373 spinlock_t n_ahs_lock; /* Protect ah allocated count */ 374 375 u32 n_srqs_allocated; 376 spinlock_t n_srqs_lock; /* Protect srqs allocated count */ 377 378 int flags; 379 struct rvt_ibport **ports; 380 381 /* QP */ 382 struct rvt_qp_ibdev *qp_dev; 383 u32 n_qps_allocated; /* number of QPs allocated for device */ 384 u32 n_rc_qps; /* number of RC QPs allocated for device */ 385 u32 busy_jiffies; /* timeout scaling based on RC QP count */ 386 spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */ 387 388 /* memory maps */ 389 struct list_head pending_mmaps; 390 spinlock_t mmap_offset_lock; /* protect mmap_offset */ 391 u32 mmap_offset; 392 spinlock_t pending_lock; /* protect pending mmap list */ 393 394 /* CQ */ 395 struct kthread_worker *worker; /* per device cq worker */ 396 u32 n_cqs_allocated; /* number of CQs allocated for device */ 397 spinlock_t n_cqs_lock; /* protect count of in use cqs */ 398 399 /* Multicast */ 400 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ 401 spinlock_t n_mcast_grps_lock; 402 403 }; 404 405 static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd) 406 { 407 return container_of(ibpd, struct rvt_pd, ibpd); 408 } 409 410 static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah) 411 { 412 return container_of(ibah, struct rvt_ah, ibah); 413 } 414 415 static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev) 416 { 417 return container_of(ibdev, struct rvt_dev_info, ibdev); 418 } 419 420 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq) 421 { 422 return container_of(ibsrq, struct rvt_srq, ibsrq); 423 } 424 425 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp) 426 { 427 return container_of(ibqp, struct rvt_qp, ibqp); 428 } 429 430 static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi) 431 { 432 /* 433 * All ports have same number of pkeys. 434 */ 435 return rdi->dparms.npkeys; 436 } 437 438 /* 439 * Return the max atomic suitable for determining 440 * the size of the ack ring buffer in a QP. 441 */ 442 static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi) 443 { 444 return rdi->dparms.max_rdma_atomic + 1; 445 } 446 447 /* 448 * Return the indexed PKEY from the port PKEY table. 449 */ 450 static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi, 451 int port_index, 452 unsigned index) 453 { 454 if (index >= rvt_get_npkeys(rdi)) 455 return 0; 456 else 457 return rdi->ports[port_index]->pkey_table[index]; 458 } 459 460 /** 461 * rvt_lookup_qpn - return the QP with the given QPN 462 * @ibp: the ibport 463 * @qpn: the QP number to look up 464 * 465 * The caller must hold the rcu_read_lock(), and keep the lock until 466 * the returned qp is no longer in use. 467 */ 468 /* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */ 469 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, 470 struct rvt_ibport *rvp, 471 u32 qpn) __must_hold(RCU) 472 { 473 struct rvt_qp *qp = NULL; 474 475 if (unlikely(qpn <= 1)) { 476 qp = rcu_dereference(rvp->qp[qpn]); 477 } else { 478 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); 479 480 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; 481 qp = rcu_dereference(qp->next)) 482 if (qp->ibqp.qp_num == qpn) 483 break; 484 } 485 return qp; 486 } 487 488 /** 489 * rvt_mod_retry_timer - mod a retry timer 490 * @qp - the QP 491 * Modify a potentially already running retry timer 492 */ 493 static inline void rvt_mod_retry_timer(struct rvt_qp *qp) 494 { 495 struct ib_qp *ibqp = &qp->ibqp; 496 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 497 498 lockdep_assert_held(&qp->s_lock); 499 qp->s_flags |= RVT_S_TIMER; 500 /* 4.096 usec. * (1 << qp->timeout) */ 501 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + 502 rdi->busy_jiffies); 503 } 504 505 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); 506 void rvt_dealloc_device(struct rvt_dev_info *rdi); 507 int rvt_register_device(struct rvt_dev_info *rvd); 508 void rvt_unregister_device(struct rvt_dev_info *rvd); 509 int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); 510 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, 511 int port_index, u16 *pkey_table); 512 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, 513 int access); 514 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); 515 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, 516 u32 len, u64 vaddr, u32 rkey, int acc); 517 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, 518 struct rvt_sge *isge, struct ib_sge *sge, int acc); 519 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid, 520 u16 lid); 521 522 #endif /* DEF_RDMA_VT_H */ 523