1f931551bSRalph Campbell /* 2f931551bSRalph Campbell * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3f931551bSRalph Campbell * All rights reserved. 4f931551bSRalph Campbell * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5f931551bSRalph Campbell * 6f931551bSRalph Campbell * This software is available to you under a choice of one of two 7f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU 8f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file 9f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the 10f931551bSRalph Campbell * OpenIB.org BSD license below: 11f931551bSRalph Campbell * 12f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or 13f931551bSRalph Campbell * without modification, are permitted provided that the following 14f931551bSRalph Campbell * conditions are met: 15f931551bSRalph Campbell * 16f931551bSRalph Campbell * - Redistributions of source code must retain the above 17f931551bSRalph Campbell * copyright notice, this list of conditions and the following 18f931551bSRalph Campbell * disclaimer. 19f931551bSRalph Campbell * 20f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above 21f931551bSRalph Campbell * copyright notice, this list of conditions and the following 22f931551bSRalph Campbell * disclaimer in the documentation and/or other materials 23f931551bSRalph Campbell * provided with the distribution. 24f931551bSRalph Campbell * 25f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32f931551bSRalph Campbell * SOFTWARE. 33f931551bSRalph Campbell */ 34f931551bSRalph Campbell 35f931551bSRalph Campbell #ifndef QIB_VERBS_H 36f931551bSRalph Campbell #define QIB_VERBS_H 37f931551bSRalph Campbell 38f931551bSRalph Campbell #include <linux/types.h> 39f931551bSRalph Campbell #include <linux/spinlock.h> 40f931551bSRalph Campbell #include <linux/kernel.h> 41f931551bSRalph Campbell #include <linux/interrupt.h> 42f931551bSRalph Campbell #include <linux/kref.h> 43f931551bSRalph Campbell #include <linux/workqueue.h> 44f931551bSRalph Campbell #include <rdma/ib_pack.h> 45f931551bSRalph Campbell #include <rdma/ib_user_verbs.h> 46f931551bSRalph Campbell 47f931551bSRalph Campbell struct qib_ctxtdata; 48f931551bSRalph Campbell struct qib_pportdata; 49f931551bSRalph Campbell struct qib_devdata; 50f931551bSRalph Campbell struct qib_verbs_txreq; 51f931551bSRalph Campbell 52f931551bSRalph Campbell #define QIB_MAX_RDMA_ATOMIC 16 53f931551bSRalph Campbell #define QIB_GUIDS_PER_PORT 5 54f931551bSRalph Campbell 55f931551bSRalph Campbell #define QPN_MAX (1 << 24) 56f931551bSRalph Campbell #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 57f931551bSRalph Campbell 58f931551bSRalph Campbell /* 59f931551bSRalph Campbell * Increment this value if any changes that break userspace ABI 60f931551bSRalph Campbell * compatibility are made. 61f931551bSRalph Campbell */ 62f931551bSRalph Campbell #define QIB_UVERBS_ABI_VERSION 2 63f931551bSRalph Campbell 64f931551bSRalph Campbell /* 65f931551bSRalph Campbell * Define an ib_cq_notify value that is not valid so we know when CQ 66f931551bSRalph Campbell * notifications are armed. 67f931551bSRalph Campbell */ 68f931551bSRalph Campbell #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) 69f931551bSRalph Campbell 70f931551bSRalph Campbell #define IB_SEQ_NAK (3 << 29) 71f931551bSRalph Campbell 72f931551bSRalph Campbell /* AETH NAK opcode values */ 73f931551bSRalph Campbell #define IB_RNR_NAK 0x20 74f931551bSRalph Campbell #define IB_NAK_PSN_ERROR 0x60 75f931551bSRalph Campbell #define IB_NAK_INVALID_REQUEST 0x61 76f931551bSRalph Campbell #define IB_NAK_REMOTE_ACCESS_ERROR 0x62 77f931551bSRalph Campbell #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 78f931551bSRalph Campbell #define IB_NAK_INVALID_RD_REQUEST 0x64 79f931551bSRalph Campbell 80f931551bSRalph Campbell /* Flags for checking QP state (see ib_qib_state_ops[]) */ 81f931551bSRalph Campbell #define QIB_POST_SEND_OK 0x01 82f931551bSRalph Campbell #define QIB_POST_RECV_OK 0x02 83f931551bSRalph Campbell #define QIB_PROCESS_RECV_OK 0x04 84f931551bSRalph Campbell #define QIB_PROCESS_SEND_OK 0x08 85f931551bSRalph Campbell #define QIB_PROCESS_NEXT_SEND_OK 0x10 86f931551bSRalph Campbell #define QIB_FLUSH_SEND 0x20 87f931551bSRalph Campbell #define QIB_FLUSH_RECV 0x40 88f931551bSRalph Campbell #define QIB_PROCESS_OR_FLUSH_SEND \ 89f931551bSRalph Campbell (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND) 90f931551bSRalph Campbell 91f931551bSRalph Campbell /* IB Performance Manager status values */ 92f931551bSRalph Campbell #define IB_PMA_SAMPLE_STATUS_DONE 0x00 93f931551bSRalph Campbell #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 94f931551bSRalph Campbell #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 95f931551bSRalph Campbell 96f931551bSRalph Campbell /* Mandatory IB performance counter select values. */ 97f931551bSRalph Campbell #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) 98f931551bSRalph Campbell #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) 99f931551bSRalph Campbell #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) 100f931551bSRalph Campbell #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) 101f931551bSRalph Campbell #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) 102f931551bSRalph Campbell 103f931551bSRalph Campbell #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0) 104f931551bSRalph Campbell 105f931551bSRalph Campbell #define IB_BTH_REQ_ACK (1 << 31) 106f931551bSRalph Campbell #define IB_BTH_SOLICITED (1 << 23) 107f931551bSRalph Campbell #define IB_BTH_MIG_REQ (1 << 22) 108f931551bSRalph Campbell 109f931551bSRalph Campbell /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */ 110f931551bSRalph Campbell #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26) 111f931551bSRalph Campbell 112f931551bSRalph Campbell #define IB_GRH_VERSION 6 113f931551bSRalph Campbell #define IB_GRH_VERSION_MASK 0xF 114f931551bSRalph Campbell #define IB_GRH_VERSION_SHIFT 28 115f931551bSRalph Campbell #define IB_GRH_TCLASS_MASK 0xFF 116f931551bSRalph Campbell #define IB_GRH_TCLASS_SHIFT 20 117f931551bSRalph Campbell #define IB_GRH_FLOW_MASK 0xFFFFF 118f931551bSRalph Campbell #define IB_GRH_FLOW_SHIFT 0 119f931551bSRalph Campbell #define IB_GRH_NEXT_HDR 0x1B 120f931551bSRalph Campbell 121f931551bSRalph Campbell #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL) 122f931551bSRalph Campbell 123f931551bSRalph Campbell /* Values for set/get portinfo VLCap OperationalVLs */ 124f931551bSRalph Campbell #define IB_VL_VL0 1 125f931551bSRalph Campbell #define IB_VL_VL0_1 2 126f931551bSRalph Campbell #define IB_VL_VL0_3 3 127f931551bSRalph Campbell #define IB_VL_VL0_7 4 128f931551bSRalph Campbell #define IB_VL_VL0_14 5 129f931551bSRalph Campbell 130f931551bSRalph Campbell static inline int qib_num_vls(int vls) 131f931551bSRalph Campbell { 132f931551bSRalph Campbell switch (vls) { 133f931551bSRalph Campbell default: 134f931551bSRalph Campbell case IB_VL_VL0: 135f931551bSRalph Campbell return 1; 136f931551bSRalph Campbell case IB_VL_VL0_1: 137f931551bSRalph Campbell return 2; 138f931551bSRalph Campbell case IB_VL_VL0_3: 139f931551bSRalph Campbell return 4; 140f931551bSRalph Campbell case IB_VL_VL0_7: 141f931551bSRalph Campbell return 8; 142f931551bSRalph Campbell case IB_VL_VL0_14: 143f931551bSRalph Campbell return 15; 144f931551bSRalph Campbell } 145f931551bSRalph Campbell } 146f931551bSRalph Campbell 147f931551bSRalph Campbell struct ib_reth { 148f931551bSRalph Campbell __be64 vaddr; 149f931551bSRalph Campbell __be32 rkey; 150f931551bSRalph Campbell __be32 length; 151f931551bSRalph Campbell } __attribute__ ((packed)); 152f931551bSRalph Campbell 153f931551bSRalph Campbell struct ib_atomic_eth { 154f931551bSRalph Campbell __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ 155f931551bSRalph Campbell __be32 rkey; 156f931551bSRalph Campbell __be64 swap_data; 157f931551bSRalph Campbell __be64 compare_data; 158f931551bSRalph Campbell } __attribute__ ((packed)); 159f931551bSRalph Campbell 160f931551bSRalph Campbell struct qib_other_headers { 161f931551bSRalph Campbell __be32 bth[3]; 162f931551bSRalph Campbell union { 163f931551bSRalph Campbell struct { 164f931551bSRalph Campbell __be32 deth[2]; 165f931551bSRalph Campbell __be32 imm_data; 166f931551bSRalph Campbell } ud; 167f931551bSRalph Campbell struct { 168f931551bSRalph Campbell struct ib_reth reth; 169f931551bSRalph Campbell __be32 imm_data; 170f931551bSRalph Campbell } rc; 171f931551bSRalph Campbell struct { 172f931551bSRalph Campbell __be32 aeth; 173f931551bSRalph Campbell __be32 atomic_ack_eth[2]; 174f931551bSRalph Campbell } at; 175f931551bSRalph Campbell __be32 imm_data; 176f931551bSRalph Campbell __be32 aeth; 177f931551bSRalph Campbell struct ib_atomic_eth atomic_eth; 178f931551bSRalph Campbell } u; 179f931551bSRalph Campbell } __attribute__ ((packed)); 180f931551bSRalph Campbell 181f931551bSRalph Campbell /* 182f931551bSRalph Campbell * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes 183f931551bSRalph Campbell * long (72 w/ imm_data). Only the first 56 bytes of the IB header 184f931551bSRalph Campbell * will be in the eager header buffer. The remaining 12 or 16 bytes 185f931551bSRalph Campbell * are in the data buffer. 186f931551bSRalph Campbell */ 187f931551bSRalph Campbell struct qib_ib_header { 188f931551bSRalph Campbell __be16 lrh[4]; 189f931551bSRalph Campbell union { 190f931551bSRalph Campbell struct { 191f931551bSRalph Campbell struct ib_grh grh; 192f931551bSRalph Campbell struct qib_other_headers oth; 193f931551bSRalph Campbell } l; 194f931551bSRalph Campbell struct qib_other_headers oth; 195f931551bSRalph Campbell } u; 196f931551bSRalph Campbell } __attribute__ ((packed)); 197f931551bSRalph Campbell 198f931551bSRalph Campbell struct qib_pio_header { 199f931551bSRalph Campbell __le32 pbc[2]; 200f931551bSRalph Campbell struct qib_ib_header hdr; 201f931551bSRalph Campbell } __attribute__ ((packed)); 202f931551bSRalph Campbell 203f931551bSRalph Campbell /* 204f931551bSRalph Campbell * There is one struct qib_mcast for each multicast GID. 205f931551bSRalph Campbell * All attached QPs are then stored as a list of 206f931551bSRalph Campbell * struct qib_mcast_qp. 207f931551bSRalph Campbell */ 208f931551bSRalph Campbell struct qib_mcast_qp { 209f931551bSRalph Campbell struct list_head list; 210f931551bSRalph Campbell struct qib_qp *qp; 211f931551bSRalph Campbell }; 212f931551bSRalph Campbell 213f931551bSRalph Campbell struct qib_mcast { 214f931551bSRalph Campbell struct rb_node rb_node; 215f931551bSRalph Campbell union ib_gid mgid; 216f931551bSRalph Campbell struct list_head qp_list; 217f931551bSRalph Campbell wait_queue_head_t wait; 218f931551bSRalph Campbell atomic_t refcount; 219f931551bSRalph Campbell int n_attached; 220f931551bSRalph Campbell }; 221f931551bSRalph Campbell 222f931551bSRalph Campbell /* Protection domain */ 223f931551bSRalph Campbell struct qib_pd { 224f931551bSRalph Campbell struct ib_pd ibpd; 225f931551bSRalph Campbell int user; /* non-zero if created from user space */ 226f931551bSRalph Campbell }; 227f931551bSRalph Campbell 228f931551bSRalph Campbell /* Address Handle */ 229f931551bSRalph Campbell struct qib_ah { 230f931551bSRalph Campbell struct ib_ah ibah; 231f931551bSRalph Campbell struct ib_ah_attr attr; 232f931551bSRalph Campbell atomic_t refcount; 233f931551bSRalph Campbell }; 234f931551bSRalph Campbell 235f931551bSRalph Campbell /* 236f931551bSRalph Campbell * This structure is used by qib_mmap() to validate an offset 237f931551bSRalph Campbell * when an mmap() request is made. The vm_area_struct then uses 238f931551bSRalph Campbell * this as its vm_private_data. 239f931551bSRalph Campbell */ 240f931551bSRalph Campbell struct qib_mmap_info { 241f931551bSRalph Campbell struct list_head pending_mmaps; 242f931551bSRalph Campbell struct ib_ucontext *context; 243f931551bSRalph Campbell void *obj; 244f931551bSRalph Campbell __u64 offset; 245f931551bSRalph Campbell struct kref ref; 246f931551bSRalph Campbell unsigned size; 247f931551bSRalph Campbell }; 248f931551bSRalph Campbell 249f931551bSRalph Campbell /* 250f931551bSRalph Campbell * This structure is used to contain the head pointer, tail pointer, 251f931551bSRalph Campbell * and completion queue entries as a single memory allocation so 252f931551bSRalph Campbell * it can be mmap'ed into user space. 253f931551bSRalph Campbell */ 254f931551bSRalph Campbell struct qib_cq_wc { 255f931551bSRalph Campbell u32 head; /* index of next entry to fill */ 256f931551bSRalph Campbell u32 tail; /* index of next ib_poll_cq() entry */ 257f931551bSRalph Campbell union { 258f931551bSRalph Campbell /* these are actually size ibcq.cqe + 1 */ 259f931551bSRalph Campbell struct ib_uverbs_wc uqueue[0]; 260f931551bSRalph Campbell struct ib_wc kqueue[0]; 261f931551bSRalph Campbell }; 262f931551bSRalph Campbell }; 263f931551bSRalph Campbell 264f931551bSRalph Campbell /* 265f931551bSRalph Campbell * The completion queue structure. 266f931551bSRalph Campbell */ 267f931551bSRalph Campbell struct qib_cq { 268f931551bSRalph Campbell struct ib_cq ibcq; 269f931551bSRalph Campbell struct work_struct comptask; 270f931551bSRalph Campbell spinlock_t lock; /* protect changes in this struct */ 271f931551bSRalph Campbell u8 notify; 272f931551bSRalph Campbell u8 triggered; 273f931551bSRalph Campbell struct qib_cq_wc *queue; 274f931551bSRalph Campbell struct qib_mmap_info *ip; 275f931551bSRalph Campbell }; 276f931551bSRalph Campbell 277f931551bSRalph Campbell /* 278f931551bSRalph Campbell * A segment is a linear region of low physical memory. 279f931551bSRalph Campbell * XXX Maybe we should use phys addr here and kmap()/kunmap(). 280f931551bSRalph Campbell * Used by the verbs layer. 281f931551bSRalph Campbell */ 282f931551bSRalph Campbell struct qib_seg { 283f931551bSRalph Campbell void *vaddr; 284f931551bSRalph Campbell size_t length; 285f931551bSRalph Campbell }; 286f931551bSRalph Campbell 287f931551bSRalph Campbell /* The number of qib_segs that fit in a page. */ 288f931551bSRalph Campbell #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg)) 289f931551bSRalph Campbell 290f931551bSRalph Campbell struct qib_segarray { 291f931551bSRalph Campbell struct qib_seg segs[QIB_SEGSZ]; 292f931551bSRalph Campbell }; 293f931551bSRalph Campbell 294f931551bSRalph Campbell struct qib_mregion { 295f931551bSRalph Campbell struct ib_pd *pd; /* shares refcnt of ibmr.pd */ 296f931551bSRalph Campbell u64 user_base; /* User's address for this region */ 297f931551bSRalph Campbell u64 iova; /* IB start address of this region */ 298f931551bSRalph Campbell size_t length; 299f931551bSRalph Campbell u32 lkey; 300f931551bSRalph Campbell u32 offset; /* offset (bytes) to start of region */ 301f931551bSRalph Campbell int access_flags; 302f931551bSRalph Campbell u32 max_segs; /* number of qib_segs in all the arrays */ 303f931551bSRalph Campbell u32 mapsz; /* size of the map array */ 3042a600f14SMike Marciniszyn u8 page_shift; /* 0 - non unform/non powerof2 sizes */ 305f931551bSRalph Campbell atomic_t refcount; 306f931551bSRalph Campbell struct qib_segarray *map[0]; /* the segments */ 307f931551bSRalph Campbell }; 308f931551bSRalph Campbell 309f931551bSRalph Campbell /* 310f931551bSRalph Campbell * These keep track of the copy progress within a memory region. 311f931551bSRalph Campbell * Used by the verbs layer. 312f931551bSRalph Campbell */ 313f931551bSRalph Campbell struct qib_sge { 314f931551bSRalph Campbell struct qib_mregion *mr; 315f931551bSRalph Campbell void *vaddr; /* kernel virtual address of segment */ 316f931551bSRalph Campbell u32 sge_length; /* length of the SGE */ 317f931551bSRalph Campbell u32 length; /* remaining length of the segment */ 318f931551bSRalph Campbell u16 m; /* current index: mr->map[m] */ 319f931551bSRalph Campbell u16 n; /* current index: mr->map[m]->segs[n] */ 320f931551bSRalph Campbell }; 321f931551bSRalph Campbell 322f931551bSRalph Campbell /* Memory region */ 323f931551bSRalph Campbell struct qib_mr { 324f931551bSRalph Campbell struct ib_mr ibmr; 325f931551bSRalph Campbell struct ib_umem *umem; 326f931551bSRalph Campbell struct qib_mregion mr; /* must be last */ 327f931551bSRalph Campbell }; 328f931551bSRalph Campbell 329f931551bSRalph Campbell /* 330f931551bSRalph Campbell * Send work request queue entry. 331f931551bSRalph Campbell * The size of the sg_list is determined when the QP is created and stored 332f931551bSRalph Campbell * in qp->s_max_sge. 333f931551bSRalph Campbell */ 334f931551bSRalph Campbell struct qib_swqe { 335f931551bSRalph Campbell struct ib_send_wr wr; /* don't use wr.sg_list */ 336f931551bSRalph Campbell u32 psn; /* first packet sequence number */ 337f931551bSRalph Campbell u32 lpsn; /* last packet sequence number */ 338f931551bSRalph Campbell u32 ssn; /* send sequence number */ 339f931551bSRalph Campbell u32 length; /* total length of data in sg_list */ 340f931551bSRalph Campbell struct qib_sge sg_list[0]; 341f931551bSRalph Campbell }; 342f931551bSRalph Campbell 343f931551bSRalph Campbell /* 344f931551bSRalph Campbell * Receive work request queue entry. 345f931551bSRalph Campbell * The size of the sg_list is determined when the QP (or SRQ) is created 346f931551bSRalph Campbell * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). 347f931551bSRalph Campbell */ 348f931551bSRalph Campbell struct qib_rwqe { 349f931551bSRalph Campbell u64 wr_id; 350f931551bSRalph Campbell u8 num_sge; 351f931551bSRalph Campbell struct ib_sge sg_list[0]; 352f931551bSRalph Campbell }; 353f931551bSRalph Campbell 354f931551bSRalph Campbell /* 355f931551bSRalph Campbell * This structure is used to contain the head pointer, tail pointer, 356f931551bSRalph Campbell * and receive work queue entries as a single memory allocation so 357f931551bSRalph Campbell * it can be mmap'ed into user space. 358f931551bSRalph Campbell * Note that the wq array elements are variable size so you can't 359f931551bSRalph Campbell * just index into the array to get the N'th element; 360f931551bSRalph Campbell * use get_rwqe_ptr() instead. 361f931551bSRalph Campbell */ 362f931551bSRalph Campbell struct qib_rwq { 363f931551bSRalph Campbell u32 head; /* new work requests posted to the head */ 364f931551bSRalph Campbell u32 tail; /* receives pull requests from here. */ 365f931551bSRalph Campbell struct qib_rwqe wq[0]; 366f931551bSRalph Campbell }; 367f931551bSRalph Campbell 368f931551bSRalph Campbell struct qib_rq { 369f931551bSRalph Campbell struct qib_rwq *wq; 370f931551bSRalph Campbell spinlock_t lock; /* protect changes in this struct */ 371f931551bSRalph Campbell u32 size; /* size of RWQE array */ 372f931551bSRalph Campbell u8 max_sge; 373f931551bSRalph Campbell }; 374f931551bSRalph Campbell 375f931551bSRalph Campbell struct qib_srq { 376f931551bSRalph Campbell struct ib_srq ibsrq; 377f931551bSRalph Campbell struct qib_rq rq; 378f931551bSRalph Campbell struct qib_mmap_info *ip; 379f931551bSRalph Campbell /* send signal when number of RWQEs < limit */ 380f931551bSRalph Campbell u32 limit; 381f931551bSRalph Campbell }; 382f931551bSRalph Campbell 383f931551bSRalph Campbell struct qib_sge_state { 384f931551bSRalph Campbell struct qib_sge *sg_list; /* next SGE to be used if any */ 385f931551bSRalph Campbell struct qib_sge sge; /* progress state for the current SGE */ 386f931551bSRalph Campbell u32 total_len; 387f931551bSRalph Campbell u8 num_sge; 388f931551bSRalph Campbell }; 389f931551bSRalph Campbell 390f931551bSRalph Campbell /* 391f931551bSRalph Campbell * This structure holds the information that the send tasklet needs 392f931551bSRalph Campbell * to send a RDMA read response or atomic operation. 393f931551bSRalph Campbell */ 394f931551bSRalph Campbell struct qib_ack_entry { 395f931551bSRalph Campbell u8 opcode; 396f931551bSRalph Campbell u8 sent; 397f931551bSRalph Campbell u32 psn; 398f931551bSRalph Campbell u32 lpsn; 399f931551bSRalph Campbell union { 400f931551bSRalph Campbell struct qib_sge rdma_sge; 401f931551bSRalph Campbell u64 atomic_data; 402f931551bSRalph Campbell }; 403f931551bSRalph Campbell }; 404f931551bSRalph Campbell 405f931551bSRalph Campbell /* 406f931551bSRalph Campbell * Variables prefixed with s_ are for the requester (sender). 407f931551bSRalph Campbell * Variables prefixed with r_ are for the responder (receiver). 408f931551bSRalph Campbell * Variables prefixed with ack_ are for responder replies. 409f931551bSRalph Campbell * 410f931551bSRalph Campbell * Common variables are protected by both r_rq.lock and s_lock in that order 411f931551bSRalph Campbell * which only happens in modify_qp() or changing the QP 'state'. 412f931551bSRalph Campbell */ 413f931551bSRalph Campbell struct qib_qp { 414f931551bSRalph Campbell struct ib_qp ibqp; 415f931551bSRalph Campbell struct qib_qp *next; /* link list for QPN hash table */ 416f931551bSRalph Campbell struct qib_qp *timer_next; /* link list for qib_ib_timer() */ 417f931551bSRalph Campbell struct list_head iowait; /* link for wait PIO buf */ 418f931551bSRalph Campbell struct list_head rspwait; /* link for waititing to respond */ 419f931551bSRalph Campbell struct ib_ah_attr remote_ah_attr; 420f931551bSRalph Campbell struct ib_ah_attr alt_ah_attr; 421f931551bSRalph Campbell struct qib_ib_header s_hdr; /* next packet header to send */ 422f931551bSRalph Campbell atomic_t refcount; 423f931551bSRalph Campbell wait_queue_head_t wait; 424f931551bSRalph Campbell wait_queue_head_t wait_dma; 425f931551bSRalph Campbell struct timer_list s_timer; 426f931551bSRalph Campbell struct work_struct s_work; 427f931551bSRalph Campbell struct qib_mmap_info *ip; 428f931551bSRalph Campbell struct qib_sge_state *s_cur_sge; 429f931551bSRalph Campbell struct qib_verbs_txreq *s_tx; 430f931551bSRalph Campbell struct qib_mregion *s_rdma_mr; 431f931551bSRalph Campbell struct qib_sge_state s_sge; /* current send request data */ 432f931551bSRalph Campbell struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]; 433f931551bSRalph Campbell struct qib_sge_state s_ack_rdma_sge; 434f931551bSRalph Campbell struct qib_sge_state s_rdma_read_sge; 435f931551bSRalph Campbell struct qib_sge_state r_sge; /* current receive data */ 436f931551bSRalph Campbell spinlock_t r_lock; /* used for APM */ 437f931551bSRalph Campbell spinlock_t s_lock; 438f931551bSRalph Campbell atomic_t s_dma_busy; 439f931551bSRalph Campbell u32 s_flags; 440f931551bSRalph Campbell u32 s_cur_size; /* size of send packet in bytes */ 441f931551bSRalph Campbell u32 s_len; /* total length of s_sge */ 442f931551bSRalph Campbell u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 443f931551bSRalph Campbell u32 s_next_psn; /* PSN for next request */ 444f931551bSRalph Campbell u32 s_last_psn; /* last response PSN processed */ 445f931551bSRalph Campbell u32 s_sending_psn; /* lowest PSN that is being sent */ 446f931551bSRalph Campbell u32 s_sending_hpsn; /* highest PSN that is being sent */ 447f931551bSRalph Campbell u32 s_psn; /* current packet sequence number */ 448f931551bSRalph Campbell u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ 449f931551bSRalph Campbell u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ 450f931551bSRalph Campbell u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ 451f931551bSRalph Campbell u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 452f931551bSRalph Campbell u64 r_wr_id; /* ID for current receive WQE */ 453f931551bSRalph Campbell unsigned long r_aflags; 454f931551bSRalph Campbell u32 r_len; /* total length of r_sge */ 455f931551bSRalph Campbell u32 r_rcv_len; /* receive data len processed */ 456f931551bSRalph Campbell u32 r_psn; /* expected rcv packet sequence number */ 457f931551bSRalph Campbell u32 r_msn; /* message sequence number */ 458f931551bSRalph Campbell u16 s_hdrwords; /* size of s_hdr in 32 bit words */ 459f931551bSRalph Campbell u16 s_rdma_ack_cnt; 460f931551bSRalph Campbell u8 state; /* QP state */ 461f931551bSRalph Campbell u8 s_state; /* opcode of last packet sent */ 462f931551bSRalph Campbell u8 s_ack_state; /* opcode of packet to ACK */ 463f931551bSRalph Campbell u8 s_nak_state; /* non-zero if NAK is pending */ 464f931551bSRalph Campbell u8 r_state; /* opcode of last packet received */ 465f931551bSRalph Campbell u8 r_nak_state; /* non-zero if NAK is pending */ 466f931551bSRalph Campbell u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 467f931551bSRalph Campbell u8 r_flags; 468f931551bSRalph Campbell u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 469f931551bSRalph Campbell u8 r_head_ack_queue; /* index into s_ack_queue[] */ 470f931551bSRalph Campbell u8 qp_access_flags; 471f931551bSRalph Campbell u8 s_max_sge; /* size of s_wq->sg_list */ 472f931551bSRalph Campbell u8 s_retry_cnt; /* number of times to retry */ 473f931551bSRalph Campbell u8 s_rnr_retry_cnt; 474f931551bSRalph Campbell u8 s_retry; /* requester retry counter */ 475f931551bSRalph Campbell u8 s_rnr_retry; /* requester RNR retry counter */ 476f931551bSRalph Campbell u8 s_pkey_index; /* PKEY index to use */ 477f931551bSRalph Campbell u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ 478f931551bSRalph Campbell u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 479f931551bSRalph Campbell u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 480f931551bSRalph Campbell u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 481f931551bSRalph Campbell u8 s_srate; 482f931551bSRalph Campbell u8 s_draining; 483f931551bSRalph Campbell u8 s_mig_state; 484f931551bSRalph Campbell u8 timeout; /* Timeout for this QP */ 485f931551bSRalph Campbell u8 alt_timeout; /* Alternate path timeout for this QP */ 486f931551bSRalph Campbell u8 port_num; 487f931551bSRalph Campbell enum ib_mtu path_mtu; 488f931551bSRalph Campbell u32 remote_qpn; 489f931551bSRalph Campbell u32 qkey; /* QKEY for this QP (for UD or RD) */ 490f931551bSRalph Campbell u32 s_size; /* send work queue size */ 491f931551bSRalph Campbell u32 s_head; /* new entries added here */ 492f931551bSRalph Campbell u32 s_tail; /* next entry to process */ 493f931551bSRalph Campbell u32 s_cur; /* current work queue entry */ 494f931551bSRalph Campbell u32 s_acked; /* last un-ACK'ed entry */ 495f931551bSRalph Campbell u32 s_last; /* last completed entry */ 496f931551bSRalph Campbell u32 s_ssn; /* SSN of tail entry */ 497f931551bSRalph Campbell u32 s_lsn; /* limit sequence number (credit) */ 498f931551bSRalph Campbell struct qib_swqe *s_wq; /* send work queue */ 499f931551bSRalph Campbell struct qib_swqe *s_wqe; 500f931551bSRalph Campbell struct qib_rq r_rq; /* receive work queue */ 501f931551bSRalph Campbell struct qib_sge r_sg_list[0]; /* verified SGEs */ 502f931551bSRalph Campbell }; 503f931551bSRalph Campbell 504f931551bSRalph Campbell /* 505f931551bSRalph Campbell * Atomic bit definitions for r_aflags. 506f931551bSRalph Campbell */ 507f931551bSRalph Campbell #define QIB_R_WRID_VALID 0 508f931551bSRalph Campbell #define QIB_R_REWIND_SGE 1 509f931551bSRalph Campbell 510f931551bSRalph Campbell /* 511f931551bSRalph Campbell * Bit definitions for r_flags. 512f931551bSRalph Campbell */ 513f931551bSRalph Campbell #define QIB_R_REUSE_SGE 0x01 514f931551bSRalph Campbell #define QIB_R_RDMAR_SEQ 0x02 515f931551bSRalph Campbell #define QIB_R_RSP_NAK 0x04 516f931551bSRalph Campbell #define QIB_R_RSP_SEND 0x08 517f931551bSRalph Campbell #define QIB_R_COMM_EST 0x10 518f931551bSRalph Campbell 519f931551bSRalph Campbell /* 520f931551bSRalph Campbell * Bit definitions for s_flags. 521f931551bSRalph Campbell * 522f931551bSRalph Campbell * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled 523f931551bSRalph Campbell * QIB_S_BUSY - send tasklet is processing the QP 524f931551bSRalph Campbell * QIB_S_TIMER - the RC retry timer is active 525f931551bSRalph Campbell * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics 526f931551bSRalph Campbell * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs 527f931551bSRalph Campbell * before processing the next SWQE 528f931551bSRalph Campbell * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete 529f931551bSRalph Campbell * before processing the next SWQE 530f931551bSRalph Campbell * QIB_S_WAIT_RNR - waiting for RNR timeout 531f931551bSRalph Campbell * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE 532f931551bSRalph Campbell * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating 533f931551bSRalph Campbell * next send completion entry not via send DMA 534f931551bSRalph Campbell * QIB_S_WAIT_PIO - waiting for a send buffer to be available 535f931551bSRalph Campbell * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available 536f931551bSRalph Campbell * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available 537f931551bSRalph Campbell * QIB_S_WAIT_KMEM - waiting for kernel memory to be available 538f931551bSRalph Campbell * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue 539f931551bSRalph Campbell * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests 540f931551bSRalph Campbell * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK 541f931551bSRalph Campbell */ 542f931551bSRalph Campbell #define QIB_S_SIGNAL_REQ_WR 0x0001 543f931551bSRalph Campbell #define QIB_S_BUSY 0x0002 544f931551bSRalph Campbell #define QIB_S_TIMER 0x0004 545f931551bSRalph Campbell #define QIB_S_RESP_PENDING 0x0008 546f931551bSRalph Campbell #define QIB_S_ACK_PENDING 0x0010 547f931551bSRalph Campbell #define QIB_S_WAIT_FENCE 0x0020 548f931551bSRalph Campbell #define QIB_S_WAIT_RDMAR 0x0040 549f931551bSRalph Campbell #define QIB_S_WAIT_RNR 0x0080 550f931551bSRalph Campbell #define QIB_S_WAIT_SSN_CREDIT 0x0100 551f931551bSRalph Campbell #define QIB_S_WAIT_DMA 0x0200 552f931551bSRalph Campbell #define QIB_S_WAIT_PIO 0x0400 553f931551bSRalph Campbell #define QIB_S_WAIT_TX 0x0800 554f931551bSRalph Campbell #define QIB_S_WAIT_DMA_DESC 0x1000 555f931551bSRalph Campbell #define QIB_S_WAIT_KMEM 0x2000 556f931551bSRalph Campbell #define QIB_S_WAIT_PSN 0x4000 557f931551bSRalph Campbell #define QIB_S_WAIT_ACK 0x8000 558f931551bSRalph Campbell #define QIB_S_SEND_ONE 0x10000 559f931551bSRalph Campbell #define QIB_S_UNLIMITED_CREDIT 0x20000 560f931551bSRalph Campbell 561f931551bSRalph Campbell /* 562f931551bSRalph Campbell * Wait flags that would prevent any packet type from being sent. 563f931551bSRalph Campbell */ 564f931551bSRalph Campbell #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \ 565f931551bSRalph Campbell QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM) 566f931551bSRalph Campbell 567f931551bSRalph Campbell /* 568f931551bSRalph Campbell * Wait flags that would prevent send work requests from making progress. 569f931551bSRalph Campbell */ 570f931551bSRalph Campbell #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \ 571f931551bSRalph Campbell QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \ 572f931551bSRalph Campbell QIB_S_WAIT_PSN | QIB_S_WAIT_ACK) 573f931551bSRalph Campbell 574f931551bSRalph Campbell #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND) 575f931551bSRalph Campbell 576f931551bSRalph Campbell #define QIB_PSN_CREDIT 16 577f931551bSRalph Campbell 578f931551bSRalph Campbell /* 579f931551bSRalph Campbell * Since struct qib_swqe is not a fixed size, we can't simply index into 580f931551bSRalph Campbell * struct qib_qp.s_wq. This function does the array index computation. 581f931551bSRalph Campbell */ 582f931551bSRalph Campbell static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, 583f931551bSRalph Campbell unsigned n) 584f931551bSRalph Campbell { 585f931551bSRalph Campbell return (struct qib_swqe *)((char *)qp->s_wq + 586f931551bSRalph Campbell (sizeof(struct qib_swqe) + 587f931551bSRalph Campbell qp->s_max_sge * 588f931551bSRalph Campbell sizeof(struct qib_sge)) * n); 589f931551bSRalph Campbell } 590f931551bSRalph Campbell 591f931551bSRalph Campbell /* 592f931551bSRalph Campbell * Since struct qib_rwqe is not a fixed size, we can't simply index into 593f931551bSRalph Campbell * struct qib_rwq.wq. This function does the array index computation. 594f931551bSRalph Campbell */ 595f931551bSRalph Campbell static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) 596f931551bSRalph Campbell { 597f931551bSRalph Campbell return (struct qib_rwqe *) 598f931551bSRalph Campbell ((char *) rq->wq->wq + 599f931551bSRalph Campbell (sizeof(struct qib_rwqe) + 600f931551bSRalph Campbell rq->max_sge * sizeof(struct ib_sge)) * n); 601f931551bSRalph Campbell } 602f931551bSRalph Campbell 603f931551bSRalph Campbell /* 604f931551bSRalph Campbell * QPN-map pages start out as NULL, they get allocated upon 605f931551bSRalph Campbell * first use and are never deallocated. This way, 606f931551bSRalph Campbell * large bitmaps are not allocated unless large numbers of QPs are used. 607f931551bSRalph Campbell */ 608f931551bSRalph Campbell struct qpn_map { 609f931551bSRalph Campbell void *page; 610f931551bSRalph Campbell }; 611f931551bSRalph Campbell 612f931551bSRalph Campbell struct qib_qpn_table { 613f931551bSRalph Campbell spinlock_t lock; /* protect changes in this struct */ 614f931551bSRalph Campbell unsigned flags; /* flags for QP0/1 allocated for each port */ 615f931551bSRalph Campbell u32 last; /* last QP number allocated */ 616f931551bSRalph Campbell u32 nmaps; /* size of the map table */ 617f931551bSRalph Campbell u16 limit; 618f931551bSRalph Campbell u16 mask; 619f931551bSRalph Campbell /* bit map of free QP numbers other than 0/1 */ 620f931551bSRalph Campbell struct qpn_map map[QPNMAP_ENTRIES]; 621f931551bSRalph Campbell }; 622f931551bSRalph Campbell 623f931551bSRalph Campbell struct qib_lkey_table { 624f931551bSRalph Campbell spinlock_t lock; /* protect changes in this struct */ 625f931551bSRalph Campbell u32 next; /* next unused index (speeds search) */ 626f931551bSRalph Campbell u32 gen; /* generation count */ 627f931551bSRalph Campbell u32 max; /* size of the table */ 628f931551bSRalph Campbell struct qib_mregion **table; 629f931551bSRalph Campbell }; 630f931551bSRalph Campbell 631f931551bSRalph Campbell struct qib_opcode_stats { 632f931551bSRalph Campbell u64 n_packets; /* number of packets */ 633f931551bSRalph Campbell u64 n_bytes; /* total number of bytes */ 634f931551bSRalph Campbell }; 635f931551bSRalph Campbell 636f931551bSRalph Campbell struct qib_ibport { 637f931551bSRalph Campbell struct qib_qp *qp0; 638f931551bSRalph Campbell struct qib_qp *qp1; 639f931551bSRalph Campbell struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ 640f931551bSRalph Campbell struct qib_ah *sm_ah; 641f931551bSRalph Campbell struct qib_ah *smi_ah; 642f931551bSRalph Campbell struct rb_root mcast_tree; 643f931551bSRalph Campbell spinlock_t lock; /* protect changes in this struct */ 644f931551bSRalph Campbell 645f931551bSRalph Campbell /* non-zero when timer is set */ 646f931551bSRalph Campbell unsigned long mkey_lease_timeout; 647f931551bSRalph Campbell unsigned long trap_timeout; 648f931551bSRalph Campbell __be64 gid_prefix; /* in network order */ 649f931551bSRalph Campbell __be64 mkey; 650f931551bSRalph Campbell __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ 651f931551bSRalph Campbell u64 tid; /* TID for traps */ 652f931551bSRalph Campbell u64 n_unicast_xmit; /* total unicast packets sent */ 653f931551bSRalph Campbell u64 n_unicast_rcv; /* total unicast packets received */ 654f931551bSRalph Campbell u64 n_multicast_xmit; /* total multicast packets sent */ 655f931551bSRalph Campbell u64 n_multicast_rcv; /* total multicast packets received */ 656f931551bSRalph Campbell u64 z_symbol_error_counter; /* starting count for PMA */ 657f931551bSRalph Campbell u64 z_link_error_recovery_counter; /* starting count for PMA */ 658f931551bSRalph Campbell u64 z_link_downed_counter; /* starting count for PMA */ 659f931551bSRalph Campbell u64 z_port_rcv_errors; /* starting count for PMA */ 660f931551bSRalph Campbell u64 z_port_rcv_remphys_errors; /* starting count for PMA */ 661f931551bSRalph Campbell u64 z_port_xmit_discards; /* starting count for PMA */ 662f931551bSRalph Campbell u64 z_port_xmit_data; /* starting count for PMA */ 663f931551bSRalph Campbell u64 z_port_rcv_data; /* starting count for PMA */ 664f931551bSRalph Campbell u64 z_port_xmit_packets; /* starting count for PMA */ 665f931551bSRalph Campbell u64 z_port_rcv_packets; /* starting count for PMA */ 666f931551bSRalph Campbell u32 z_local_link_integrity_errors; /* starting count for PMA */ 667f931551bSRalph Campbell u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ 668f931551bSRalph Campbell u32 z_vl15_dropped; /* starting count for PMA */ 669f931551bSRalph Campbell u32 n_rc_resends; 670f931551bSRalph Campbell u32 n_rc_acks; 671f931551bSRalph Campbell u32 n_rc_qacks; 672f931551bSRalph Campbell u32 n_rc_delayed_comp; 673f931551bSRalph Campbell u32 n_seq_naks; 674f931551bSRalph Campbell u32 n_rdma_seq; 675f931551bSRalph Campbell u32 n_rnr_naks; 676f931551bSRalph Campbell u32 n_other_naks; 677f931551bSRalph Campbell u32 n_loop_pkts; 678f931551bSRalph Campbell u32 n_pkt_drops; 679f931551bSRalph Campbell u32 n_vl15_dropped; 680f931551bSRalph Campbell u32 n_rc_timeouts; 681f931551bSRalph Campbell u32 n_dmawait; 682f931551bSRalph Campbell u32 n_unaligned; 683f931551bSRalph Campbell u32 n_rc_dupreq; 684f931551bSRalph Campbell u32 n_rc_seqnak; 685f931551bSRalph Campbell u32 port_cap_flags; 686f931551bSRalph Campbell u32 pma_sample_start; 687f931551bSRalph Campbell u32 pma_sample_interval; 688f931551bSRalph Campbell __be16 pma_counter_select[5]; 689f931551bSRalph Campbell u16 pma_tag; 690f931551bSRalph Campbell u16 pkey_violations; 691f931551bSRalph Campbell u16 qkey_violations; 692f931551bSRalph Campbell u16 mkey_violations; 693f931551bSRalph Campbell u16 mkey_lease_period; 694f931551bSRalph Campbell u16 sm_lid; 695f931551bSRalph Campbell u16 repress_traps; 696f931551bSRalph Campbell u8 sm_sl; 697f931551bSRalph Campbell u8 mkeyprot; 698f931551bSRalph Campbell u8 subnet_timeout; 699f931551bSRalph Campbell u8 vl_high_limit; 700f931551bSRalph Campbell u8 sl_to_vl[16]; 701f931551bSRalph Campbell 702f931551bSRalph Campbell struct qib_opcode_stats opstats[128]; 703f931551bSRalph Campbell }; 704f931551bSRalph Campbell 705f931551bSRalph Campbell struct qib_ibdev { 706f931551bSRalph Campbell struct ib_device ibdev; 707f931551bSRalph Campbell struct list_head pending_mmaps; 708f931551bSRalph Campbell spinlock_t mmap_offset_lock; /* protect mmap_offset */ 709f931551bSRalph Campbell u32 mmap_offset; 710f931551bSRalph Campbell struct qib_mregion *dma_mr; 711f931551bSRalph Campbell 712f931551bSRalph Campbell /* QP numbers are shared by all IB ports */ 713f931551bSRalph Campbell struct qib_qpn_table qpn_table; 714f931551bSRalph Campbell struct qib_lkey_table lk_table; 715f931551bSRalph Campbell struct list_head piowait; /* list for wait PIO buf */ 716f931551bSRalph Campbell struct list_head dmawait; /* list for wait DMA */ 717f931551bSRalph Campbell struct list_head txwait; /* list for wait qib_verbs_txreq */ 718f931551bSRalph Campbell struct list_head memwait; /* list for wait kernel memory */ 719f931551bSRalph Campbell struct list_head txreq_free; 720f931551bSRalph Campbell struct timer_list mem_timer; 721f931551bSRalph Campbell struct qib_qp **qp_table; 722f931551bSRalph Campbell struct qib_pio_header *pio_hdrs; 723f931551bSRalph Campbell dma_addr_t pio_hdrs_phys; 724f931551bSRalph Campbell /* list of QPs waiting for RNR timer */ 725f931551bSRalph Campbell spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ 726f931551bSRalph Campbell unsigned qp_table_size; /* size of the hash table */ 727f931551bSRalph Campbell spinlock_t qpt_lock; 728f931551bSRalph Campbell 729f931551bSRalph Campbell u32 n_piowait; 730f931551bSRalph Campbell u32 n_txwait; 731f931551bSRalph Campbell 732f931551bSRalph Campbell u32 n_pds_allocated; /* number of PDs allocated for device */ 733f931551bSRalph Campbell spinlock_t n_pds_lock; 734f931551bSRalph Campbell u32 n_ahs_allocated; /* number of AHs allocated for device */ 735f931551bSRalph Campbell spinlock_t n_ahs_lock; 736f931551bSRalph Campbell u32 n_cqs_allocated; /* number of CQs allocated for device */ 737f931551bSRalph Campbell spinlock_t n_cqs_lock; 738f931551bSRalph Campbell u32 n_qps_allocated; /* number of QPs allocated for device */ 739f931551bSRalph Campbell spinlock_t n_qps_lock; 740f931551bSRalph Campbell u32 n_srqs_allocated; /* number of SRQs allocated for device */ 741f931551bSRalph Campbell spinlock_t n_srqs_lock; 742f931551bSRalph Campbell u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ 743f931551bSRalph Campbell spinlock_t n_mcast_grps_lock; 744f931551bSRalph Campbell }; 745f931551bSRalph Campbell 746f931551bSRalph Campbell struct qib_verbs_counters { 747f931551bSRalph Campbell u64 symbol_error_counter; 748f931551bSRalph Campbell u64 link_error_recovery_counter; 749f931551bSRalph Campbell u64 link_downed_counter; 750f931551bSRalph Campbell u64 port_rcv_errors; 751f931551bSRalph Campbell u64 port_rcv_remphys_errors; 752f931551bSRalph Campbell u64 port_xmit_discards; 753f931551bSRalph Campbell u64 port_xmit_data; 754f931551bSRalph Campbell u64 port_rcv_data; 755f931551bSRalph Campbell u64 port_xmit_packets; 756f931551bSRalph Campbell u64 port_rcv_packets; 757f931551bSRalph Campbell u32 local_link_integrity_errors; 758f931551bSRalph Campbell u32 excessive_buffer_overrun_errors; 759f931551bSRalph Campbell u32 vl15_dropped; 760f931551bSRalph Campbell }; 761f931551bSRalph Campbell 762f931551bSRalph Campbell static inline struct qib_mr *to_imr(struct ib_mr *ibmr) 763f931551bSRalph Campbell { 764f931551bSRalph Campbell return container_of(ibmr, struct qib_mr, ibmr); 765f931551bSRalph Campbell } 766f931551bSRalph Campbell 767f931551bSRalph Campbell static inline struct qib_pd *to_ipd(struct ib_pd *ibpd) 768f931551bSRalph Campbell { 769f931551bSRalph Campbell return container_of(ibpd, struct qib_pd, ibpd); 770f931551bSRalph Campbell } 771f931551bSRalph Campbell 772f931551bSRalph Campbell static inline struct qib_ah *to_iah(struct ib_ah *ibah) 773f931551bSRalph Campbell { 774f931551bSRalph Campbell return container_of(ibah, struct qib_ah, ibah); 775f931551bSRalph Campbell } 776f931551bSRalph Campbell 777f931551bSRalph Campbell static inline struct qib_cq *to_icq(struct ib_cq *ibcq) 778f931551bSRalph Campbell { 779f931551bSRalph Campbell return container_of(ibcq, struct qib_cq, ibcq); 780f931551bSRalph Campbell } 781f931551bSRalph Campbell 782f931551bSRalph Campbell static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq) 783f931551bSRalph Campbell { 784f931551bSRalph Campbell return container_of(ibsrq, struct qib_srq, ibsrq); 785f931551bSRalph Campbell } 786f931551bSRalph Campbell 787f931551bSRalph Campbell static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) 788f931551bSRalph Campbell { 789f931551bSRalph Campbell return container_of(ibqp, struct qib_qp, ibqp); 790f931551bSRalph Campbell } 791f931551bSRalph Campbell 792f931551bSRalph Campbell static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) 793f931551bSRalph Campbell { 794f931551bSRalph Campbell return container_of(ibdev, struct qib_ibdev, ibdev); 795f931551bSRalph Campbell } 796f931551bSRalph Campbell 797f931551bSRalph Campbell /* 798f931551bSRalph Campbell * Send if not busy or waiting for I/O and either 799f931551bSRalph Campbell * a RC response is pending or we can process send work requests. 800f931551bSRalph Campbell */ 801f931551bSRalph Campbell static inline int qib_send_ok(struct qib_qp *qp) 802f931551bSRalph Campbell { 803f931551bSRalph Campbell return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && 804f931551bSRalph Campbell (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || 805f931551bSRalph Campbell !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 806f931551bSRalph Campbell } 807f931551bSRalph Campbell 808f931551bSRalph Campbell extern struct workqueue_struct *qib_wq; 809f931551bSRalph Campbell extern struct workqueue_struct *qib_cq_wq; 810f931551bSRalph Campbell 811f931551bSRalph Campbell /* 812f931551bSRalph Campbell * This must be called with s_lock held. 813f931551bSRalph Campbell */ 814f931551bSRalph Campbell static inline void qib_schedule_send(struct qib_qp *qp) 815f931551bSRalph Campbell { 8162528ea60SMike Marciniszyn if (qib_send_ok(qp)) 817f931551bSRalph Campbell queue_work(qib_wq, &qp->s_work); 818f931551bSRalph Campbell } 819f931551bSRalph Campbell 820f931551bSRalph Campbell static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) 821f931551bSRalph Campbell { 822f931551bSRalph Campbell u16 p1 = pkey1 & 0x7FFF; 823f931551bSRalph Campbell u16 p2 = pkey2 & 0x7FFF; 824f931551bSRalph Campbell 825f931551bSRalph Campbell /* 826f931551bSRalph Campbell * Low 15 bits must be non-zero and match, and 827f931551bSRalph Campbell * one of the two must be a full member. 828f931551bSRalph Campbell */ 829f931551bSRalph Campbell return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0); 830f931551bSRalph Campbell } 831f931551bSRalph Campbell 832f931551bSRalph Campbell void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, 833f931551bSRalph Campbell u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); 834f931551bSRalph Campbell void qib_cap_mask_chg(struct qib_ibport *ibp); 835f931551bSRalph Campbell void qib_sys_guid_chg(struct qib_ibport *ibp); 836f931551bSRalph Campbell void qib_node_desc_chg(struct qib_ibport *ibp); 837f931551bSRalph Campbell int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 838f931551bSRalph Campbell struct ib_wc *in_wc, struct ib_grh *in_grh, 839f931551bSRalph Campbell struct ib_mad *in_mad, struct ib_mad *out_mad); 840f931551bSRalph Campbell int qib_create_agents(struct qib_ibdev *dev); 841f931551bSRalph Campbell void qib_free_agents(struct qib_ibdev *dev); 842f931551bSRalph Campbell 843f931551bSRalph Campbell /* 844f931551bSRalph Campbell * Compare the lower 24 bits of the two values. 845f931551bSRalph Campbell * Returns an integer <, ==, or > than zero. 846f931551bSRalph Campbell */ 847f931551bSRalph Campbell static inline int qib_cmp24(u32 a, u32 b) 848f931551bSRalph Campbell { 849f931551bSRalph Campbell return (((int) a) - ((int) b)) << 8; 850f931551bSRalph Campbell } 851f931551bSRalph Campbell 852f931551bSRalph Campbell struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid); 853f931551bSRalph Campbell 854f931551bSRalph Campbell int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, 855f931551bSRalph Campbell u64 *rwords, u64 *spkts, u64 *rpkts, 856f931551bSRalph Campbell u64 *xmit_wait); 857f931551bSRalph Campbell 858f931551bSRalph Campbell int qib_get_counters(struct qib_pportdata *ppd, 859f931551bSRalph Campbell struct qib_verbs_counters *cntrs); 860f931551bSRalph Campbell 861f931551bSRalph Campbell int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 862f931551bSRalph Campbell 863f931551bSRalph Campbell int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 864f931551bSRalph Campbell 865f931551bSRalph Campbell int qib_mcast_tree_empty(struct qib_ibport *ibp); 866f931551bSRalph Campbell 867f931551bSRalph Campbell __be32 qib_compute_aeth(struct qib_qp *qp); 868f931551bSRalph Campbell 869f931551bSRalph Campbell struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn); 870f931551bSRalph Campbell 871f931551bSRalph Campbell struct ib_qp *qib_create_qp(struct ib_pd *ibpd, 872f931551bSRalph Campbell struct ib_qp_init_attr *init_attr, 873f931551bSRalph Campbell struct ib_udata *udata); 874f931551bSRalph Campbell 875f931551bSRalph Campbell int qib_destroy_qp(struct ib_qp *ibqp); 876f931551bSRalph Campbell 877f931551bSRalph Campbell int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); 878f931551bSRalph Campbell 879f931551bSRalph Campbell int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 880f931551bSRalph Campbell int attr_mask, struct ib_udata *udata); 881f931551bSRalph Campbell 882f931551bSRalph Campbell int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 883f931551bSRalph Campbell int attr_mask, struct ib_qp_init_attr *init_attr); 884f931551bSRalph Campbell 885f931551bSRalph Campbell unsigned qib_free_all_qps(struct qib_devdata *dd); 886f931551bSRalph Campbell 887f931551bSRalph Campbell void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); 888f931551bSRalph Campbell 889f931551bSRalph Campbell void qib_free_qpn_table(struct qib_qpn_table *qpt); 890f931551bSRalph Campbell 891f931551bSRalph Campbell void qib_get_credit(struct qib_qp *qp, u32 aeth); 892f931551bSRalph Campbell 893f931551bSRalph Campbell unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); 894f931551bSRalph Campbell 895f931551bSRalph Campbell void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); 896f931551bSRalph Campbell 897f931551bSRalph Campbell void qib_put_txreq(struct qib_verbs_txreq *tx); 898f931551bSRalph Campbell 899f931551bSRalph Campbell int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, 900f931551bSRalph Campbell u32 hdrwords, struct qib_sge_state *ss, u32 len); 901f931551bSRalph Campbell 902f931551bSRalph Campbell void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, 903f931551bSRalph Campbell int release); 904f931551bSRalph Campbell 905f931551bSRalph Campbell void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release); 906f931551bSRalph Campbell 907f931551bSRalph Campbell void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 908f931551bSRalph Campbell int has_grh, void *data, u32 tlen, struct qib_qp *qp); 909f931551bSRalph Campbell 910f931551bSRalph Campbell void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, 911f931551bSRalph Campbell int has_grh, void *data, u32 tlen, struct qib_qp *qp); 912f931551bSRalph Campbell 913f931551bSRalph Campbell int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); 914f931551bSRalph Campbell 915f931551bSRalph Campbell void qib_rc_rnr_retry(unsigned long arg); 916f931551bSRalph Campbell 917f931551bSRalph Campbell void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); 918f931551bSRalph Campbell 919f931551bSRalph Campbell void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); 920f931551bSRalph Campbell 921f931551bSRalph Campbell int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); 922f931551bSRalph Campbell 923f931551bSRalph Campbell void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 924f931551bSRalph Campbell int has_grh, void *data, u32 tlen, struct qib_qp *qp); 925f931551bSRalph Campbell 926f931551bSRalph Campbell int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); 927f931551bSRalph Campbell 928f931551bSRalph Campbell int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); 929f931551bSRalph Campbell 930f931551bSRalph Campbell int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, 931f931551bSRalph Campbell struct qib_sge *isge, struct ib_sge *sge, int acc); 932f931551bSRalph Campbell 933f931551bSRalph Campbell int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, 934f931551bSRalph Campbell u32 len, u64 vaddr, u32 rkey, int acc); 935f931551bSRalph Campbell 936f931551bSRalph Campbell int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 937f931551bSRalph Campbell struct ib_recv_wr **bad_wr); 938f931551bSRalph Campbell 939f931551bSRalph Campbell struct ib_srq *qib_create_srq(struct ib_pd *ibpd, 940f931551bSRalph Campbell struct ib_srq_init_attr *srq_init_attr, 941f931551bSRalph Campbell struct ib_udata *udata); 942f931551bSRalph Campbell 943f931551bSRalph Campbell int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 944f931551bSRalph Campbell enum ib_srq_attr_mask attr_mask, 945f931551bSRalph Campbell struct ib_udata *udata); 946f931551bSRalph Campbell 947f931551bSRalph Campbell int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); 948f931551bSRalph Campbell 949f931551bSRalph Campbell int qib_destroy_srq(struct ib_srq *ibsrq); 950f931551bSRalph Campbell 951f931551bSRalph Campbell void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); 952f931551bSRalph Campbell 953f931551bSRalph Campbell int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 954f931551bSRalph Campbell 955f931551bSRalph Campbell struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, 956f931551bSRalph Campbell int comp_vector, struct ib_ucontext *context, 957f931551bSRalph Campbell struct ib_udata *udata); 958f931551bSRalph Campbell 959f931551bSRalph Campbell int qib_destroy_cq(struct ib_cq *ibcq); 960f931551bSRalph Campbell 961f931551bSRalph Campbell int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); 962f931551bSRalph Campbell 963f931551bSRalph Campbell int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 964f931551bSRalph Campbell 965f931551bSRalph Campbell struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc); 966f931551bSRalph Campbell 967f931551bSRalph Campbell struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, 968f931551bSRalph Campbell struct ib_phys_buf *buffer_list, 969f931551bSRalph Campbell int num_phys_buf, int acc, u64 *iova_start); 970f931551bSRalph Campbell 971f931551bSRalph Campbell struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 972f931551bSRalph Campbell u64 virt_addr, int mr_access_flags, 973f931551bSRalph Campbell struct ib_udata *udata); 974f931551bSRalph Campbell 975f931551bSRalph Campbell int qib_dereg_mr(struct ib_mr *ibmr); 976f931551bSRalph Campbell 977f931551bSRalph Campbell struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); 978f931551bSRalph Campbell 979f931551bSRalph Campbell struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list( 980f931551bSRalph Campbell struct ib_device *ibdev, int page_list_len); 981f931551bSRalph Campbell 982f931551bSRalph Campbell void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); 983f931551bSRalph Campbell 984f931551bSRalph Campbell int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); 985f931551bSRalph Campbell 986f931551bSRalph Campbell struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 987f931551bSRalph Campbell struct ib_fmr_attr *fmr_attr); 988f931551bSRalph Campbell 989f931551bSRalph Campbell int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 990f931551bSRalph Campbell int list_len, u64 iova); 991f931551bSRalph Campbell 992f931551bSRalph Campbell int qib_unmap_fmr(struct list_head *fmr_list); 993f931551bSRalph Campbell 994f931551bSRalph Campbell int qib_dealloc_fmr(struct ib_fmr *ibfmr); 995f931551bSRalph Campbell 996f931551bSRalph Campbell void qib_release_mmap_info(struct kref *ref); 997f931551bSRalph Campbell 998f931551bSRalph Campbell struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, 999f931551bSRalph Campbell struct ib_ucontext *context, 1000f931551bSRalph Campbell void *obj); 1001f931551bSRalph Campbell 1002f931551bSRalph Campbell void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, 1003f931551bSRalph Campbell u32 size, void *obj); 1004f931551bSRalph Campbell 1005f931551bSRalph Campbell int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 1006f931551bSRalph Campbell 1007f931551bSRalph Campbell int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); 1008f931551bSRalph Campbell 1009f931551bSRalph Campbell void qib_migrate_qp(struct qib_qp *qp); 1010f931551bSRalph Campbell 1011f931551bSRalph Campbell int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, 1012f931551bSRalph Campbell int has_grh, struct qib_qp *qp, u32 bth0); 1013f931551bSRalph Campbell 1014f931551bSRalph Campbell u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, 1015f931551bSRalph Campbell struct ib_global_route *grh, u32 hwords, u32 nwords); 1016f931551bSRalph Campbell 1017f931551bSRalph Campbell void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, 1018f931551bSRalph Campbell u32 bth0, u32 bth2); 1019f931551bSRalph Campbell 1020f931551bSRalph Campbell void qib_do_send(struct work_struct *work); 1021f931551bSRalph Campbell 1022f931551bSRalph Campbell void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, 1023f931551bSRalph Campbell enum ib_wc_status status); 1024f931551bSRalph Campbell 1025f931551bSRalph Campbell void qib_send_rc_ack(struct qib_qp *qp); 1026f931551bSRalph Campbell 1027f931551bSRalph Campbell int qib_make_rc_req(struct qib_qp *qp); 1028f931551bSRalph Campbell 1029f931551bSRalph Campbell int qib_make_uc_req(struct qib_qp *qp); 1030f931551bSRalph Campbell 1031f931551bSRalph Campbell int qib_make_ud_req(struct qib_qp *qp); 1032f931551bSRalph Campbell 1033f931551bSRalph Campbell int qib_register_ib_device(struct qib_devdata *); 1034f931551bSRalph Campbell 1035f931551bSRalph Campbell void qib_unregister_ib_device(struct qib_devdata *); 1036f931551bSRalph Campbell 1037f931551bSRalph Campbell void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32); 1038f931551bSRalph Campbell 1039f931551bSRalph Campbell void qib_ib_piobufavail(struct qib_devdata *); 1040f931551bSRalph Campbell 1041f931551bSRalph Campbell unsigned qib_get_npkeys(struct qib_devdata *); 1042f931551bSRalph Campbell 1043f931551bSRalph Campbell unsigned qib_get_pkey(struct qib_ibport *, unsigned); 1044f931551bSRalph Campbell 1045f931551bSRalph Campbell extern const enum ib_wc_opcode ib_qib_wc_opcode[]; 1046f931551bSRalph Campbell 1047f931551bSRalph Campbell /* 1048f931551bSRalph Campbell * Below HCA-independent IB PhysPortState values, returned 1049f931551bSRalph Campbell * by the f_ibphys_portstate() routine. 1050f931551bSRalph Campbell */ 1051f931551bSRalph Campbell #define IB_PHYSPORTSTATE_SLEEP 1 1052f931551bSRalph Campbell #define IB_PHYSPORTSTATE_POLL 2 1053f931551bSRalph Campbell #define IB_PHYSPORTSTATE_DISABLED 3 1054f931551bSRalph Campbell #define IB_PHYSPORTSTATE_CFG_TRAIN 4 1055f931551bSRalph Campbell #define IB_PHYSPORTSTATE_LINKUP 5 1056f931551bSRalph Campbell #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6 1057f931551bSRalph Campbell #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8 1058f931551bSRalph Campbell #define IB_PHYSPORTSTATE_CFG_IDLE 0xB 1059f931551bSRalph Campbell #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC 1060f931551bSRalph Campbell #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE 1061f931551bSRalph Campbell #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF 1062f931551bSRalph Campbell #define IB_PHYSPORTSTATE_CFG_ENH 0x10 1063f931551bSRalph Campbell #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13 1064f931551bSRalph Campbell 1065f931551bSRalph Campbell extern const int ib_qib_state_ops[]; 1066f931551bSRalph Campbell 1067f931551bSRalph Campbell extern __be64 ib_qib_sys_image_guid; /* in network order */ 1068f931551bSRalph Campbell 1069f931551bSRalph Campbell extern unsigned int ib_qib_lkey_table_size; 1070f931551bSRalph Campbell 1071f931551bSRalph Campbell extern unsigned int ib_qib_max_cqes; 1072f931551bSRalph Campbell 1073f931551bSRalph Campbell extern unsigned int ib_qib_max_cqs; 1074f931551bSRalph Campbell 1075f931551bSRalph Campbell extern unsigned int ib_qib_max_qp_wrs; 1076f931551bSRalph Campbell 1077f931551bSRalph Campbell extern unsigned int ib_qib_max_qps; 1078f931551bSRalph Campbell 1079f931551bSRalph Campbell extern unsigned int ib_qib_max_sges; 1080f931551bSRalph Campbell 1081f931551bSRalph Campbell extern unsigned int ib_qib_max_mcast_grps; 1082f931551bSRalph Campbell 1083f931551bSRalph Campbell extern unsigned int ib_qib_max_mcast_qp_attached; 1084f931551bSRalph Campbell 1085f931551bSRalph Campbell extern unsigned int ib_qib_max_srqs; 1086f931551bSRalph Campbell 1087f931551bSRalph Campbell extern unsigned int ib_qib_max_srq_sges; 1088f931551bSRalph Campbell 1089f931551bSRalph Campbell extern unsigned int ib_qib_max_srq_wrs; 1090f931551bSRalph Campbell 1091f931551bSRalph Campbell extern const u32 ib_qib_rnr_table[]; 1092f931551bSRalph Campbell 1093f931551bSRalph Campbell extern struct ib_dma_mapping_ops qib_dma_mapping_ops; 1094f931551bSRalph Campbell 1095f931551bSRalph Campbell #endif /* QIB_VERBS_H */ 1096