xref: /openbmc/linux/drivers/infiniband/hw/mthca/mthca_provider.h (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $
33  */
34 
35 #ifndef MTHCA_PROVIDER_H
36 #define MTHCA_PROVIDER_H
37 
38 #include <ib_verbs.h>
39 #include <ib_pack.h>
40 
41 #define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
42 #define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
43 #define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
44 #define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
45 #define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
46 
47 struct mthca_buf_list {
48 	void *buf;
49 	DECLARE_PCI_UNMAP_ADDR(mapping)
50 };
51 
52 struct mthca_uar {
53 	unsigned long pfn;
54 	int           index;
55 };
56 
57 struct mthca_mr {
58 	struct ib_mr ibmr;
59 	int order;
60 	u32 first_seg;
61 };
62 
63 struct mthca_pd {
64 	struct ib_pd    ibpd;
65 	u32             pd_num;
66 	atomic_t        sqp_count;
67 	struct mthca_mr ntmr;
68 };
69 
70 struct mthca_eq {
71 	struct mthca_dev      *dev;
72 	int                    eqn;
73 	u32                    eqn_mask;
74 	u32                    cons_index;
75 	u16                    msi_x_vector;
76 	u16                    msi_x_entry;
77 	int                    have_irq;
78 	int                    nent;
79 	struct mthca_buf_list *page_list;
80 	struct mthca_mr        mr;
81 };
82 
83 struct mthca_av;
84 
85 enum mthca_ah_type {
86 	MTHCA_AH_ON_HCA,
87 	MTHCA_AH_PCI_POOL,
88 	MTHCA_AH_KMALLOC
89 };
90 
91 struct mthca_ah {
92 	struct ib_ah       ibah;
93 	enum mthca_ah_type type;
94 	u32                key;
95 	struct mthca_av   *av;
96 	dma_addr_t         avdma;
97 };
98 
99 /*
100  * Quick description of our CQ/QP locking scheme:
101  *
102  * We have one global lock that protects dev->cq/qp_table.  Each
103  * struct mthca_cq/qp also has its own lock.  An individual qp lock
104  * may be taken inside of an individual cq lock.  Both cqs attached to
105  * a qp may be locked, with the send cq locked first.  No other
106  * nesting should be done.
107  *
108  * Each struct mthca_cq/qp also has an atomic_t ref count.  The
109  * pointer from the cq/qp_table to the struct counts as one reference.
110  * This reference also is good for access through the consumer API, so
111  * modifying the CQ/QP etc doesn't need to take another reference.
112  * Access because of a completion being polled does need a reference.
113  *
114  * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
115  * destroy function to sleep on.
116  *
117  * This means that access from the consumer API requires nothing but
118  * taking the struct's lock.
119  *
120  * Access because of a completion event should go as follows:
121  * - lock cq/qp_table and look up struct
122  * - increment ref count in struct
123  * - drop cq/qp_table lock
124  * - lock struct, do your thing, and unlock struct
125  * - decrement ref count; if zero, wake up waiters
126  *
127  * To destroy a CQ/QP, we can do the following:
128  * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
129  * - decrement ref count
130  * - wait_event until ref count is zero
131  *
132  * It is the consumer's responsibilty to make sure that no QP
133  * operations (WQE posting or state modification) are pending when the
134  * QP is destroyed.  Also, the consumer must make sure that calls to
135  * qp_modify are serialized.
136  *
137  * Possible optimizations (wait for profile data to see if/where we
138  * have locks bouncing between CPUs):
139  * - split cq/qp table lock into n separate (cache-aligned) locks,
140  *   indexed (say) by the page in the table
141  * - split QP struct lock into three (one for common info, one for the
142  *   send queue and one for the receive queue)
143  */
144 
145 struct mthca_cq {
146 	struct ib_cq           ibcq;
147 	spinlock_t             lock;
148 	atomic_t               refcount;
149 	int                    cqn;
150 	u32                    cons_index;
151 	int                    is_direct;
152 
153 	/* Next fields are Arbel only */
154 	int                    set_ci_db_index;
155 	u32                   *set_ci_db;
156 	int                    arm_db_index;
157 	u32                   *arm_db;
158 	int                    arm_sn;
159 
160 	union {
161 		struct mthca_buf_list direct;
162 		struct mthca_buf_list *page_list;
163 	}                      queue;
164 	struct mthca_mr        mr;
165 	wait_queue_head_t      wait;
166 };
167 
168 struct mthca_wq {
169 	spinlock_t lock;
170 	int        max;
171 	unsigned   next_ind;
172 	unsigned   last_comp;
173 	unsigned   head;
174 	unsigned   tail;
175 	void      *last;
176 	int        max_gs;
177 	int        wqe_shift;
178 
179 	int        db_index;	/* Arbel only */
180 	u32       *db;
181 };
182 
183 struct mthca_qp {
184 	struct ib_qp           ibqp;
185 	atomic_t               refcount;
186 	u32                    qpn;
187 	int                    is_direct;
188 	u8                     transport;
189 	u8                     state;
190 	u8                     atomic_rd_en;
191 	u8                     resp_depth;
192 
193 	struct mthca_mr        mr;
194 
195 	struct mthca_wq        rq;
196 	struct mthca_wq        sq;
197 	enum ib_sig_type       sq_policy;
198 	int                    send_wqe_offset;
199 
200 	u64                   *wrid;
201 	union {
202 		struct mthca_buf_list direct;
203 		struct mthca_buf_list *page_list;
204 	}                      queue;
205 
206 	wait_queue_head_t      wait;
207 };
208 
209 struct mthca_sqp {
210 	struct mthca_qp qp;
211 	int             port;
212 	int             pkey_index;
213 	u32             qkey;
214 	u32             send_psn;
215 	struct ib_ud_header ud_header;
216 	int             header_buf_size;
217 	void           *header_buf;
218 	dma_addr_t      header_dma;
219 };
220 
221 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
222 {
223 	return container_of(ibmr, struct mthca_mr, ibmr);
224 }
225 
226 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
227 {
228 	return container_of(ibpd, struct mthca_pd, ibpd);
229 }
230 
231 static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
232 {
233 	return container_of(ibah, struct mthca_ah, ibah);
234 }
235 
236 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
237 {
238 	return container_of(ibcq, struct mthca_cq, ibcq);
239 }
240 
241 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
242 {
243 	return container_of(ibqp, struct mthca_qp, ibqp);
244 }
245 
246 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
247 {
248 	return container_of(qp, struct mthca_sqp, qp);
249 }
250 
251 #endif /* MTHCA_PROVIDER_H */
252