1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef RXE_LOC_H
35 #define RXE_LOC_H
36 
37 /* rxe_av.c */
38 
39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
40 
41 void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
42 		     struct rdma_ah_attr *attr);
43 
44 void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
45 
46 void rxe_av_fill_ip_info(struct rxe_av *av,
47 			struct rdma_ah_attr *attr,
48 			struct ib_gid_attr *sgid_attr,
49 			union ib_gid *sgid);
50 
51 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
52 
53 /* rxe_cq.c */
54 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
55 		    int cqe, int comp_vector);
56 
57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58 		     int comp_vector, struct ib_ucontext *context,
59 		     struct rxe_create_cq_resp __user *uresp);
60 
61 int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
62 			struct rxe_resize_cq_resp __user *uresp);
63 
64 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
65 
66 void rxe_cq_disable(struct rxe_cq *cq);
67 
68 void rxe_cq_cleanup(struct rxe_pool_entry *arg);
69 
70 /* rxe_mcast.c */
71 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
72 		      struct rxe_mc_grp **grp_p);
73 
74 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
75 			   struct rxe_mc_grp *grp);
76 
77 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
78 			    union ib_gid *mgid);
79 
80 void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
81 
82 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
83 
84 /* rxe_mmap.c */
85 struct rxe_mmap_info {
86 	struct list_head	pending_mmaps;
87 	struct ib_ucontext	*context;
88 	struct kref		ref;
89 	void			*obj;
90 
91 	struct mminfo info;
92 };
93 
94 void rxe_mmap_release(struct kref *ref);
95 
96 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
97 					   u32 size,
98 					   struct ib_ucontext *context,
99 					   void *obj);
100 
101 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
102 
103 /* rxe_mr.c */
104 enum copy_direction {
105 	to_mem_obj,
106 	from_mem_obj,
107 };
108 
109 int rxe_mem_init_dma(struct rxe_pd *pd,
110 		     int access, struct rxe_mem *mem);
111 
112 int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
113 		      u64 length, u64 iova, int access, struct ib_udata *udata,
114 		      struct rxe_mem *mr);
115 
116 int rxe_mem_init_fast(struct rxe_pd *pd,
117 		      int max_pages, struct rxe_mem *mem);
118 
119 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
120 		 int length, enum copy_direction dir, u32 *crcp);
121 
122 int copy_data(struct rxe_pd *pd, int access,
123 	      struct rxe_dma_info *dma, void *addr, int length,
124 	      enum copy_direction dir, u32 *crcp);
125 
126 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
127 
128 enum lookup_type {
129 	lookup_local,
130 	lookup_remote,
131 };
132 
133 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
134 			   enum lookup_type type);
135 
136 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
137 
138 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
139 		      u64 *page, int num_pages, u64 iova);
140 
141 void rxe_mem_cleanup(struct rxe_pool_entry *arg);
142 
143 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
144 
145 /* rxe_net.c */
146 void rxe_loopback(struct sk_buff *skb);
147 int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
148 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
149 				int paylen, struct rxe_pkt_info *pkt);
150 int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
151 		struct sk_buff *skb, u32 *crc);
152 enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
153 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
154 struct device *rxe_dma_device(struct rxe_dev *rxe);
155 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
156 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
157 
158 /* rxe_qp.c */
159 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
160 
161 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
162 		     struct ib_qp_init_attr *init,
163 		     struct rxe_create_qp_resp __user *uresp,
164 		     struct ib_pd *ibpd);
165 
166 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
167 
168 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
169 		    struct ib_qp_attr *attr, int mask);
170 
171 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
172 		     int mask, struct ib_udata *udata);
173 
174 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
175 
176 void rxe_qp_error(struct rxe_qp *qp);
177 
178 void rxe_qp_destroy(struct rxe_qp *qp);
179 
180 void rxe_qp_cleanup(struct rxe_pool_entry *arg);
181 
182 static inline int qp_num(struct rxe_qp *qp)
183 {
184 	return qp->ibqp.qp_num;
185 }
186 
187 static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
188 {
189 	return qp->ibqp.qp_type;
190 }
191 
192 static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
193 {
194 	return qp->attr.qp_state;
195 }
196 
197 static inline int qp_mtu(struct rxe_qp *qp)
198 {
199 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
200 		return qp->attr.path_mtu;
201 	else
202 		return RXE_PORT_MAX_MTU;
203 }
204 
205 static inline int rcv_wqe_size(int max_sge)
206 {
207 	return sizeof(struct rxe_recv_wqe) +
208 		max_sge * sizeof(struct ib_sge);
209 }
210 
211 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
212 
213 static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
214 {
215 	qp->resp.res_head++;
216 	if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
217 		qp->resp.res_head = 0;
218 }
219 
220 void retransmit_timer(struct timer_list *t);
221 void rnr_nak_timer(struct timer_list *t);
222 
223 /* rxe_srq.c */
224 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
225 
226 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
227 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
228 
229 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
230 		      struct ib_srq_init_attr *init,
231 		      struct ib_ucontext *context,
232 		      struct rxe_create_srq_resp __user *uresp);
233 
234 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
235 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
236 		      struct rxe_modify_srq_cmd *ucmd);
237 
238 void rxe_release(struct kref *kref);
239 
240 int rxe_completer(void *arg);
241 int rxe_requester(void *arg);
242 int rxe_responder(void *arg);
243 
244 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
245 
246 void rxe_resp_queue_pkt(struct rxe_dev *rxe,
247 			struct rxe_qp *qp, struct sk_buff *skb);
248 
249 void rxe_comp_queue_pkt(struct rxe_dev *rxe,
250 			struct rxe_qp *qp, struct sk_buff *skb);
251 
252 static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
253 {
254 	return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
255 }
256 
257 static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
258 				  struct rxe_pkt_info *pkt, struct sk_buff *skb)
259 {
260 	int err;
261 	int is_request = pkt->mask & RXE_REQ_MASK;
262 
263 	if ((is_request && (qp->req.state != QP_STATE_READY)) ||
264 	    (!is_request && (qp->resp.state != QP_STATE_READY))) {
265 		pr_info("Packet dropped. QP is not in ready state\n");
266 		goto drop;
267 	}
268 
269 	if (pkt->mask & RXE_LOOPBACK_MASK) {
270 		memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
271 		rxe_loopback(skb);
272 		err = 0;
273 	} else {
274 		err = rxe_send(pkt, skb);
275 	}
276 
277 	if (err) {
278 		rxe->xmit_errors++;
279 		rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
280 		return err;
281 	}
282 
283 	if ((qp_type(qp) != IB_QPT_RC) &&
284 	    (pkt->mask & RXE_END_MASK)) {
285 		pkt->wqe->state = wqe_state_done;
286 		rxe_run_task(&qp->comp.task, 1);
287 	}
288 
289 	rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
290 	goto done;
291 
292 drop:
293 	kfree_skb(skb);
294 	err = 0;
295 done:
296 	return err;
297 }
298 
299 #endif /* RXE_LOC_H */
300