1 #ifndef DEF_RDMA_VT_H 2 #define DEF_RDMA_VT_H 3 4 /* 5 * Copyright(c) 2016 Intel Corporation. 6 * 7 * This file is provided under a dual BSD/GPLv2 license. When using or 8 * redistributing this file, you may do so under either license. 9 * 10 * GPL LICENSE SUMMARY 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * BSD LICENSE 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 27 * - Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * - Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in 31 * the documentation and/or other materials provided with the 32 * distribution. 33 * - Neither the name of Intel Corporation nor the names of its 34 * contributors may be used to endorse or promote products derived 35 * from this software without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 * 49 */ 50 51 /* 52 * Structure that low level drivers will populate in order to register with the 53 * rdmavt layer. 54 */ 55 56 #include <linux/spinlock.h> 57 #include <linux/list.h> 58 #include <linux/hash.h> 59 #include <rdma/ib_verbs.h> 60 #include <rdma/ib_mad.h> 61 #include <rdma/rdmavt_mr.h> 62 #include <rdma/rdmavt_qp.h> 63 64 #define RVT_MAX_PKEY_VALUES 16 65 66 #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */ 67 #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/ 68 #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */ 69 70 struct trap_list { 71 u32 list_len; 72 struct list_head list; 73 }; 74 75 struct rvt_ibport { 76 struct rvt_qp __rcu *qp[2]; 77 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ 78 struct rb_root mcast_tree; 79 spinlock_t lock; /* protect changes in this struct */ 80 81 /* non-zero when timer is set */ 82 unsigned long mkey_lease_timeout; 83 unsigned long trap_timeout; 84 __be64 gid_prefix; /* in network order */ 85 __be64 mkey; 86 u64 tid; 87 u32 port_cap_flags; 88 u16 port_cap3_flags; 89 u32 pma_sample_start; 90 u32 pma_sample_interval; 91 __be16 pma_counter_select[5]; 92 u16 pma_tag; 93 u16 mkey_lease_period; 94 u32 sm_lid; 95 u8 sm_sl; 96 u8 mkeyprot; 97 u8 subnet_timeout; 98 u8 vl_high_limit; 99 100 /* 101 * Driver is expected to keep these up to date. These 102 * counters are informational only and not required to be 103 * completely accurate. 104 */ 105 u64 n_rc_resends; 106 u64 n_seq_naks; 107 u64 n_rdma_seq; 108 u64 n_rnr_naks; 109 u64 n_other_naks; 110 u64 n_loop_pkts; 111 u64 n_pkt_drops; 112 u64 n_vl15_dropped; 113 u64 n_rc_timeouts; 114 u64 n_dmawait; 115 u64 n_unaligned; 116 u64 n_rc_dupreq; 117 u64 n_rc_seqnak; 118 u16 pkey_violations; 119 u16 qkey_violations; 120 u16 mkey_violations; 121 122 /* Hot-path per CPU counters to avoid cacheline trading to update */ 123 u64 z_rc_acks; 124 u64 z_rc_qacks; 125 u64 z_rc_delayed_comp; 126 u64 __percpu *rc_acks; 127 u64 __percpu *rc_qacks; 128 u64 __percpu *rc_delayed_comp; 129 130 void *priv; /* driver private data */ 131 132 /* 133 * The pkey table is allocated and maintained by the driver. Drivers 134 * need to have access to this before registering with rdmav. However 135 * rdmavt will need access to it so drivers need to proviee this during 136 * the attach port API call. 137 */ 138 u16 *pkey_table; 139 140 struct rvt_ah *sm_ah; 141 142 /* 143 * Keep a list of traps that have not been repressed. They will be 144 * resent based on trap_timer. 145 */ 146 struct trap_list trap_lists[RVT_MAX_TRAP_LISTS]; 147 struct timer_list trap_timer; 148 }; 149 150 #define RVT_CQN_MAX 16 /* maximum length of cq name */ 151 152 /* 153 * Things that are driver specific, module parameters in hfi1 and qib 154 */ 155 struct rvt_driver_params { 156 struct ib_device_attr props; 157 158 /* 159 * Anything driver specific that is not covered by props 160 * For instance special module parameters. Goes here. 161 */ 162 unsigned int lkey_table_size; 163 unsigned int qp_table_size; 164 int qpn_start; 165 int qpn_inc; 166 int qpn_res_start; 167 int qpn_res_end; 168 int nports; 169 int npkeys; 170 char cq_name[RVT_CQN_MAX]; 171 int node; 172 int psn_mask; 173 int psn_shift; 174 int psn_modify_mask; 175 u32 core_cap_flags; 176 u32 max_mad_size; 177 u8 qos_shift; 178 u8 max_rdma_atomic; 179 u8 reserved_operations; 180 }; 181 182 /* Protection domain */ 183 struct rvt_pd { 184 struct ib_pd ibpd; 185 bool user; 186 }; 187 188 /* Address handle */ 189 struct rvt_ah { 190 struct ib_ah ibah; 191 struct rdma_ah_attr attr; 192 atomic_t refcount; 193 u8 vl; 194 u8 log_pmtu; 195 }; 196 197 struct rvt_dev_info; 198 struct rvt_swqe; 199 struct rvt_driver_provided { 200 /* 201 * Which functions are required depends on which verbs rdmavt is 202 * providing and which verbs the driver is overriding. See 203 * check_support() for details. 204 */ 205 206 /* hot path calldowns in a single cacheline */ 207 208 /* 209 * Give the driver a notice that there is send work to do. It is up to 210 * the driver to generally push the packets out, this just queues the 211 * work with the driver. There are two variants here. The no_lock 212 * version requires the s_lock not to be held. The other assumes the 213 * s_lock is held. 214 */ 215 void (*schedule_send)(struct rvt_qp *qp); 216 void (*schedule_send_no_lock)(struct rvt_qp *qp); 217 218 /* Driver specific work request checking */ 219 int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe); 220 221 /* 222 * Sometimes rdmavt needs to kick the driver's send progress. That is 223 * done by this call back. 224 */ 225 void (*do_send)(struct rvt_qp *qp); 226 227 /* Passed to ib core registration. Callback to create syfs files */ 228 int (*port_callback)(struct ib_device *, u8, struct kobject *); 229 230 /* 231 * Returns a string to represent the device for which is being 232 * registered. This is primarily used for error and debug messages on 233 * the console. 234 */ 235 const char * (*get_card_name)(struct rvt_dev_info *rdi); 236 237 /* 238 * Returns a pointer to the undelying hardware's PCI device. This is 239 * used to display information as to what hardware is being referenced 240 * in an output message 241 */ 242 struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi); 243 244 /* 245 * Allocate a private queue pair data structure for driver specific 246 * information which is opaque to rdmavt. Errors are returned via 247 * ERR_PTR(err). The driver is free to return NULL or a valid 248 * pointer. 249 */ 250 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp); 251 252 /* 253 * Free the driver's private qp structure. 254 */ 255 void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp); 256 257 /* 258 * Inform the driver the particular qp in quesiton has been reset so 259 * that it can clean up anything it needs to. 260 */ 261 void (*notify_qp_reset)(struct rvt_qp *qp); 262 263 /* 264 * Get a path mtu from the driver based on qp attributes. 265 */ 266 int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 267 struct ib_qp_attr *attr); 268 269 /* 270 * Notify driver that it needs to flush any outstanding IO requests that 271 * are waiting on a qp. 272 */ 273 void (*flush_qp_waiters)(struct rvt_qp *qp); 274 275 /* 276 * Notify driver to stop its queue of sending packets. Nothing else 277 * should be posted to the queue pair after this has been called. 278 */ 279 void (*stop_send_queue)(struct rvt_qp *qp); 280 281 /* 282 * Have the drivr drain any in progress operations 283 */ 284 void (*quiesce_qp)(struct rvt_qp *qp); 285 286 /* 287 * Inform the driver a qp has went to error state. 288 */ 289 void (*notify_error_qp)(struct rvt_qp *qp); 290 291 /* 292 * Get an MTU for a qp. 293 */ 294 u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 295 u32 pmtu); 296 /* 297 * Convert an mtu to a path mtu 298 */ 299 int (*mtu_to_path_mtu)(u32 mtu); 300 301 /* 302 * Get the guid of a port in big endian byte order 303 */ 304 int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, 305 int guid_index, __be64 *guid); 306 307 /* 308 * Query driver for the state of the port. 309 */ 310 int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num, 311 struct ib_port_attr *props); 312 313 /* 314 * Tell driver to shutdown a port 315 */ 316 int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num); 317 318 /* Tell driver to send a trap for changed port capabilities */ 319 void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num); 320 321 /* 322 * The following functions can be safely ignored completely. Any use of 323 * these is checked for NULL before blindly calling. Rdmavt should also 324 * be functional if drivers omit these. 325 */ 326 327 /* Called to inform the driver that all qps should now be freed. */ 328 unsigned (*free_all_qps)(struct rvt_dev_info *rdi); 329 330 /* Driver specific AH validation */ 331 int (*check_ah)(struct ib_device *, struct rdma_ah_attr *); 332 333 /* Inform the driver a new AH has been created */ 334 void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *, 335 struct rvt_ah *); 336 337 /* Let the driver pick the next queue pair number*/ 338 int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 339 enum ib_qp_type type, u8 port_num); 340 341 /* Determine if its safe or allowed to modify the qp */ 342 int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, 343 int attr_mask, struct ib_udata *udata); 344 345 /* Driver specific QP modification/notification-of */ 346 void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, 347 int attr_mask, struct ib_udata *udata); 348 349 /* Notify driver a mad agent has been created */ 350 void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx); 351 352 /* Notify driver a mad agent has been removed */ 353 void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx); 354 355 /* Notify driver to restart rc */ 356 void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait); 357 }; 358 359 struct rvt_dev_info { 360 struct ib_device ibdev; /* Keep this first. Nothing above here */ 361 362 /* 363 * Prior to calling for registration the driver will be responsible for 364 * allocating space for this structure. 365 * 366 * The driver will also be responsible for filling in certain members of 367 * dparms.props. The driver needs to fill in dparms exactly as it would 368 * want values reported to a ULP. This will be returned to the caller 369 * in rdmavt's device. The driver should also therefore refrain from 370 * modifying this directly after registration with rdmavt. 371 */ 372 373 /* Driver specific properties */ 374 struct rvt_driver_params dparms; 375 376 /* post send table */ 377 const struct rvt_operation_params *post_parms; 378 379 /* Driver specific helper functions */ 380 struct rvt_driver_provided driver_f; 381 382 struct rvt_mregion __rcu *dma_mr; 383 struct rvt_lkey_table lkey_table; 384 385 /* Internal use */ 386 int n_pds_allocated; 387 spinlock_t n_pds_lock; /* Protect pd allocated count */ 388 389 int n_ahs_allocated; 390 spinlock_t n_ahs_lock; /* Protect ah allocated count */ 391 392 u32 n_srqs_allocated; 393 spinlock_t n_srqs_lock; /* Protect srqs allocated count */ 394 395 int flags; 396 struct rvt_ibport **ports; 397 398 /* QP */ 399 struct rvt_qp_ibdev *qp_dev; 400 u32 n_qps_allocated; /* number of QPs allocated for device */ 401 u32 n_rc_qps; /* number of RC QPs allocated for device */ 402 u32 busy_jiffies; /* timeout scaling based on RC QP count */ 403 spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */ 404 405 /* memory maps */ 406 struct list_head pending_mmaps; 407 spinlock_t mmap_offset_lock; /* protect mmap_offset */ 408 u32 mmap_offset; 409 spinlock_t pending_lock; /* protect pending mmap list */ 410 411 /* CQ */ 412 struct kthread_worker *worker; /* per device cq worker */ 413 u32 n_cqs_allocated; /* number of CQs allocated for device */ 414 spinlock_t n_cqs_lock; /* protect count of in use cqs */ 415 416 /* Multicast */ 417 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ 418 spinlock_t n_mcast_grps_lock; 419 420 }; 421 422 static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd) 423 { 424 return container_of(ibpd, struct rvt_pd, ibpd); 425 } 426 427 static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah) 428 { 429 return container_of(ibah, struct rvt_ah, ibah); 430 } 431 432 static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev) 433 { 434 return container_of(ibdev, struct rvt_dev_info, ibdev); 435 } 436 437 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq) 438 { 439 return container_of(ibsrq, struct rvt_srq, ibsrq); 440 } 441 442 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp) 443 { 444 return container_of(ibqp, struct rvt_qp, ibqp); 445 } 446 447 static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi) 448 { 449 /* 450 * All ports have same number of pkeys. 451 */ 452 return rdi->dparms.npkeys; 453 } 454 455 /* 456 * Return the max atomic suitable for determining 457 * the size of the ack ring buffer in a QP. 458 */ 459 static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi) 460 { 461 return rdi->dparms.max_rdma_atomic + 1; 462 } 463 464 /* 465 * Return the indexed PKEY from the port PKEY table. 466 */ 467 static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi, 468 int port_index, 469 unsigned index) 470 { 471 if (index >= rvt_get_npkeys(rdi)) 472 return 0; 473 else 474 return rdi->ports[port_index]->pkey_table[index]; 475 } 476 477 /** 478 * rvt_lookup_qpn - return the QP with the given QPN 479 * @ibp: the ibport 480 * @qpn: the QP number to look up 481 * 482 * The caller must hold the rcu_read_lock(), and keep the lock until 483 * the returned qp is no longer in use. 484 */ 485 /* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */ 486 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, 487 struct rvt_ibport *rvp, 488 u32 qpn) __must_hold(RCU) 489 { 490 struct rvt_qp *qp = NULL; 491 492 if (unlikely(qpn <= 1)) { 493 qp = rcu_dereference(rvp->qp[qpn]); 494 } else { 495 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); 496 497 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; 498 qp = rcu_dereference(qp->next)) 499 if (qp->ibqp.qp_num == qpn) 500 break; 501 } 502 return qp; 503 } 504 505 /** 506 * rvt_mod_retry_timer - mod a retry timer 507 * @qp - the QP 508 * Modify a potentially already running retry timer 509 */ 510 static inline void rvt_mod_retry_timer(struct rvt_qp *qp) 511 { 512 struct ib_qp *ibqp = &qp->ibqp; 513 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 514 515 lockdep_assert_held(&qp->s_lock); 516 qp->s_flags |= RVT_S_TIMER; 517 /* 4.096 usec. * (1 << qp->timeout) */ 518 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + 519 rdi->busy_jiffies); 520 } 521 522 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); 523 void rvt_dealloc_device(struct rvt_dev_info *rdi); 524 int rvt_register_device(struct rvt_dev_info *rvd); 525 void rvt_unregister_device(struct rvt_dev_info *rvd); 526 int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); 527 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, 528 int port_index, u16 *pkey_table); 529 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, 530 int access); 531 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); 532 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, 533 u32 len, u64 vaddr, u32 rkey, int acc); 534 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, 535 struct rvt_sge *isge, struct rvt_sge *last_sge, 536 struct ib_sge *sge, int acc); 537 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid, 538 u16 lid); 539 540 #endif /* DEF_RDMA_VT_H */ 541