1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef RXE_LOC_H 35 #define RXE_LOC_H 36 37 /* rxe_av.c */ 38 void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av); 39 40 int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr); 41 42 void rxe_av_from_attr(u8 port_num, struct rxe_av *av, 43 struct rdma_ah_attr *attr); 44 45 void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr); 46 47 void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr); 48 49 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt); 50 51 /* rxe_cq.c */ 52 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, 53 int cqe, int comp_vector); 54 55 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, 56 int comp_vector, struct ib_udata *udata, 57 struct rxe_create_cq_resp __user *uresp); 58 59 int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, 60 struct rxe_resize_cq_resp __user *uresp, 61 struct ib_udata *udata); 62 63 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); 64 65 void rxe_cq_disable(struct rxe_cq *cq); 66 67 void rxe_cq_cleanup(struct rxe_pool_entry *arg); 68 69 /* rxe_mcast.c */ 70 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, 71 struct rxe_mc_grp **grp_p); 72 73 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, 74 struct rxe_mc_grp *grp); 75 76 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, 77 union ib_gid *mgid); 78 79 void rxe_drop_all_mcast_groups(struct rxe_qp *qp); 80 81 void rxe_mc_cleanup(struct rxe_pool_entry *arg); 82 83 /* rxe_mmap.c */ 84 struct rxe_mmap_info { 85 struct list_head pending_mmaps; 86 struct ib_ucontext *context; 87 struct kref ref; 88 void *obj; 89 90 struct mminfo info; 91 }; 92 93 void rxe_mmap_release(struct kref *ref); 94 95 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size, 96 struct ib_udata *udata, void *obj); 97 98 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 99 100 /* rxe_mr.c */ 101 enum copy_direction { 102 to_mem_obj, 103 from_mem_obj, 104 }; 105 106 void rxe_mem_init_dma(struct rxe_pd *pd, 107 int access, struct rxe_mem *mem); 108 109 int rxe_mem_init_user(struct rxe_pd *pd, u64 start, 110 u64 length, u64 iova, int access, struct ib_udata *udata, 111 struct rxe_mem *mr); 112 113 int rxe_mem_init_fast(struct rxe_pd *pd, 114 int max_pages, struct rxe_mem *mem); 115 116 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, 117 int length, enum copy_direction dir, u32 *crcp); 118 119 int copy_data(struct rxe_pd *pd, int access, 120 struct rxe_dma_info *dma, void *addr, int length, 121 enum copy_direction dir, u32 *crcp); 122 123 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length); 124 125 enum lookup_type { 126 lookup_local, 127 lookup_remote, 128 }; 129 130 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key, 131 enum lookup_type type); 132 133 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length); 134 135 void rxe_mem_cleanup(struct rxe_pool_entry *arg); 136 137 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); 138 139 /* rxe_net.c */ 140 void rxe_loopback(struct sk_buff *skb); 141 int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb); 142 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, 143 int paylen, struct rxe_pkt_info *pkt); 144 int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc); 145 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); 146 struct device *rxe_dma_device(struct rxe_dev *rxe); 147 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid); 148 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid); 149 150 /* rxe_qp.c */ 151 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init); 152 153 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 154 struct ib_qp_init_attr *init, 155 struct rxe_create_qp_resp __user *uresp, 156 struct ib_pd *ibpd, struct ib_udata *udata); 157 158 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init); 159 160 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 161 struct ib_qp_attr *attr, int mask); 162 163 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, 164 int mask, struct ib_udata *udata); 165 166 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask); 167 168 void rxe_qp_error(struct rxe_qp *qp); 169 170 void rxe_qp_destroy(struct rxe_qp *qp); 171 172 void rxe_qp_cleanup(struct rxe_pool_entry *arg); 173 174 static inline int qp_num(struct rxe_qp *qp) 175 { 176 return qp->ibqp.qp_num; 177 } 178 179 static inline enum ib_qp_type qp_type(struct rxe_qp *qp) 180 { 181 return qp->ibqp.qp_type; 182 } 183 184 static inline enum ib_qp_state qp_state(struct rxe_qp *qp) 185 { 186 return qp->attr.qp_state; 187 } 188 189 static inline int qp_mtu(struct rxe_qp *qp) 190 { 191 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) 192 return qp->attr.path_mtu; 193 else 194 return IB_MTU_4096; 195 } 196 197 static inline int rcv_wqe_size(int max_sge) 198 { 199 return sizeof(struct rxe_recv_wqe) + 200 max_sge * sizeof(struct ib_sge); 201 } 202 203 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res); 204 205 static inline void rxe_advance_resp_resource(struct rxe_qp *qp) 206 { 207 qp->resp.res_head++; 208 if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic)) 209 qp->resp.res_head = 0; 210 } 211 212 void retransmit_timer(struct timer_list *t); 213 void rnr_nak_timer(struct timer_list *t); 214 215 /* rxe_srq.c */ 216 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT) 217 218 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 219 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); 220 221 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, 222 struct ib_srq_init_attr *init, struct ib_udata *udata, 223 struct rxe_create_srq_resp __user *uresp); 224 225 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 226 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 227 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata); 228 229 void rxe_dealloc(struct ib_device *ib_dev); 230 231 int rxe_completer(void *arg); 232 int rxe_requester(void *arg); 233 int rxe_responder(void *arg); 234 235 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb); 236 237 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb); 238 239 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb); 240 241 static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp) 242 { 243 return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type]; 244 } 245 246 static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, 247 struct sk_buff *skb) 248 { 249 int err; 250 int is_request = pkt->mask & RXE_REQ_MASK; 251 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 252 253 if ((is_request && (qp->req.state != QP_STATE_READY)) || 254 (!is_request && (qp->resp.state != QP_STATE_READY))) { 255 pr_info("Packet dropped. QP is not in ready state\n"); 256 goto drop; 257 } 258 259 if (pkt->mask & RXE_LOOPBACK_MASK) { 260 memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); 261 rxe_loopback(skb); 262 err = 0; 263 } else { 264 err = rxe_send(pkt, skb); 265 } 266 267 if (err) { 268 rxe->xmit_errors++; 269 rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); 270 return err; 271 } 272 273 if ((qp_type(qp) != IB_QPT_RC) && 274 (pkt->mask & RXE_END_MASK)) { 275 pkt->wqe->state = wqe_state_done; 276 rxe_run_task(&qp->comp.task, 1); 277 } 278 279 rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); 280 goto done; 281 282 drop: 283 kfree_skb(skb); 284 err = 0; 285 done: 286 return err; 287 } 288 289 #endif /* RXE_LOC_H */ 290