xref: /openbmc/linux/drivers/infiniband/sw/rxe/rxe_loc.h (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef RXE_LOC_H
35 #define RXE_LOC_H
36 
37 /* rxe_av.c */
38 
39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
40 
41 void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
42 		     struct rdma_ah_attr *attr);
43 
44 void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
45 
46 void rxe_av_fill_ip_info(struct rxe_av *av,
47 			struct rdma_ah_attr *attr,
48 			struct ib_gid_attr *sgid_attr,
49 			union ib_gid *sgid);
50 
51 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
52 
53 /* rxe_cq.c */
54 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
55 		    int cqe, int comp_vector, struct ib_udata *udata);
56 
57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58 		     int comp_vector, struct ib_ucontext *context,
59 		     struct ib_udata *udata);
60 
61 int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
62 
63 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
64 
65 void rxe_cq_disable(struct rxe_cq *cq);
66 
67 void rxe_cq_cleanup(struct rxe_pool_entry *arg);
68 
69 /* rxe_mcast.c */
70 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
71 		      struct rxe_mc_grp **grp_p);
72 
73 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
74 			   struct rxe_mc_grp *grp);
75 
76 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
77 			    union ib_gid *mgid);
78 
79 void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
80 
81 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
82 
83 /* rxe_mmap.c */
84 struct rxe_mmap_info {
85 	struct list_head	pending_mmaps;
86 	struct ib_ucontext	*context;
87 	struct kref		ref;
88 	void			*obj;
89 
90 	struct mminfo info;
91 };
92 
93 void rxe_mmap_release(struct kref *ref);
94 
95 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
96 					   u32 size,
97 					   struct ib_ucontext *context,
98 					   void *obj);
99 
100 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
101 
102 /* rxe_mr.c */
103 enum copy_direction {
104 	to_mem_obj,
105 	from_mem_obj,
106 };
107 
108 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
109 		     int access, struct rxe_mem *mem);
110 
111 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
112 		      u64 length, u64 iova, int access, struct ib_udata *udata,
113 		      struct rxe_mem *mr);
114 
115 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
116 		      int max_pages, struct rxe_mem *mem);
117 
118 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
119 		 int length, enum copy_direction dir, u32 *crcp);
120 
121 int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
122 	      struct rxe_dma_info *dma, void *addr, int length,
123 	      enum copy_direction dir, u32 *crcp);
124 
125 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
126 
127 enum lookup_type {
128 	lookup_local,
129 	lookup_remote,
130 };
131 
132 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
133 			   enum lookup_type type);
134 
135 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
136 
137 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
138 		      u64 *page, int num_pages, u64 iova);
139 
140 void rxe_mem_cleanup(struct rxe_pool_entry *arg);
141 
142 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
143 
144 /* rxe_net.c */
145 int rxe_loopback(struct sk_buff *skb);
146 int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
147 	     struct sk_buff *skb);
148 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
149 				int paylen, struct rxe_pkt_info *pkt);
150 int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
151 		struct sk_buff *skb, u32 *crc);
152 enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
153 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
154 struct device *rxe_dma_device(struct rxe_dev *rxe);
155 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
156 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
157 
158 /* rxe_qp.c */
159 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
160 
161 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
162 		     struct ib_qp_init_attr *init, struct ib_udata *udata,
163 		     struct ib_pd *ibpd);
164 
165 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
166 
167 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
168 		    struct ib_qp_attr *attr, int mask);
169 
170 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
171 		     int mask, struct ib_udata *udata);
172 
173 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
174 
175 void rxe_qp_error(struct rxe_qp *qp);
176 
177 void rxe_qp_destroy(struct rxe_qp *qp);
178 
179 void rxe_qp_cleanup(struct rxe_pool_entry *arg);
180 
181 static inline int qp_num(struct rxe_qp *qp)
182 {
183 	return qp->ibqp.qp_num;
184 }
185 
186 static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
187 {
188 	return qp->ibqp.qp_type;
189 }
190 
191 static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
192 {
193 	return qp->attr.qp_state;
194 }
195 
196 static inline int qp_mtu(struct rxe_qp *qp)
197 {
198 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
199 		return qp->attr.path_mtu;
200 	else
201 		return RXE_PORT_MAX_MTU;
202 }
203 
204 static inline int rcv_wqe_size(int max_sge)
205 {
206 	return sizeof(struct rxe_recv_wqe) +
207 		max_sge * sizeof(struct ib_sge);
208 }
209 
210 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
211 
212 static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
213 {
214 	qp->resp.res_head++;
215 	if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
216 		qp->resp.res_head = 0;
217 }
218 
219 void retransmit_timer(struct timer_list *t);
220 void rnr_nak_timer(struct timer_list *t);
221 
222 /* rxe_srq.c */
223 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
224 
225 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
226 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
227 
228 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
229 		      struct ib_srq_init_attr *init,
230 		      struct ib_ucontext *context, struct ib_udata *udata);
231 
232 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
233 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
234 		      struct ib_udata *udata);
235 
236 void rxe_release(struct kref *kref);
237 
238 int rxe_completer(void *arg);
239 int rxe_requester(void *arg);
240 int rxe_responder(void *arg);
241 
242 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
243 
244 void rxe_resp_queue_pkt(struct rxe_dev *rxe,
245 			struct rxe_qp *qp, struct sk_buff *skb);
246 
247 void rxe_comp_queue_pkt(struct rxe_dev *rxe,
248 			struct rxe_qp *qp, struct sk_buff *skb);
249 
250 static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
251 {
252 	return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
253 }
254 
255 static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
256 				  struct rxe_pkt_info *pkt, struct sk_buff *skb)
257 {
258 	int err;
259 	int is_request = pkt->mask & RXE_REQ_MASK;
260 
261 	if ((is_request && (qp->req.state != QP_STATE_READY)) ||
262 	    (!is_request && (qp->resp.state != QP_STATE_READY))) {
263 		pr_info("Packet dropped. QP is not in ready state\n");
264 		goto drop;
265 	}
266 
267 	if (pkt->mask & RXE_LOOPBACK_MASK) {
268 		memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
269 		err = rxe_loopback(skb);
270 	} else {
271 		err = rxe_send(rxe, pkt, skb);
272 	}
273 
274 	if (err) {
275 		rxe->xmit_errors++;
276 		rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
277 		return err;
278 	}
279 
280 	if ((qp_type(qp) != IB_QPT_RC) &&
281 	    (pkt->mask & RXE_END_MASK)) {
282 		pkt->wqe->state = wqe_state_done;
283 		rxe_run_task(&qp->comp.task, 1);
284 	}
285 
286 	rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
287 	goto done;
288 
289 drop:
290 	kfree_skb(skb);
291 	err = 0;
292 done:
293 	return err;
294 }
295 
296 #endif /* RXE_LOC_H */
297