1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef RXE_LOC_H 35 #define RXE_LOC_H 36 37 /* rxe_av.c */ 38 39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr); 40 41 int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num, 42 struct rxe_av *av, struct ib_ah_attr *attr); 43 44 int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av, 45 struct ib_ah_attr *attr); 46 47 int rxe_av_fill_ip_info(struct rxe_dev *rxe, 48 struct rxe_av *av, 49 struct ib_ah_attr *attr, 50 struct ib_gid_attr *sgid_attr, 51 union ib_gid *sgid); 52 53 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt); 54 55 /* rxe_cq.c */ 56 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, 57 int cqe, int comp_vector, struct ib_udata *udata); 58 59 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, 60 int comp_vector, struct ib_ucontext *context, 61 struct ib_udata *udata); 62 63 int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata); 64 65 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); 66 67 void rxe_cq_cleanup(struct rxe_pool_entry *arg); 68 69 /* rxe_mcast.c */ 70 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, 71 struct rxe_mc_grp **grp_p); 72 73 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, 74 struct rxe_mc_grp *grp); 75 76 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, 77 union ib_gid *mgid); 78 79 void rxe_drop_all_mcast_groups(struct rxe_qp *qp); 80 81 void rxe_mc_cleanup(struct rxe_pool_entry *arg); 82 83 /* rxe_mmap.c */ 84 struct rxe_mmap_info { 85 struct list_head pending_mmaps; 86 struct ib_ucontext *context; 87 struct kref ref; 88 void *obj; 89 90 struct mminfo info; 91 }; 92 93 void rxe_mmap_release(struct kref *ref); 94 95 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, 96 u32 size, 97 struct ib_ucontext *context, 98 void *obj); 99 100 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 101 102 /* rxe_mr.c */ 103 enum copy_direction { 104 to_mem_obj, 105 from_mem_obj, 106 }; 107 108 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, 109 int access, struct rxe_mem *mem); 110 111 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, 112 u64 length, u64 iova, int access, struct ib_udata *udata, 113 struct rxe_mem *mr); 114 115 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, 116 int max_pages, struct rxe_mem *mem); 117 118 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, 119 int length, enum copy_direction dir, u32 *crcp); 120 121 int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access, 122 struct rxe_dma_info *dma, void *addr, int length, 123 enum copy_direction dir, u32 *crcp); 124 125 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length); 126 127 enum lookup_type { 128 lookup_local, 129 lookup_remote, 130 }; 131 132 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key, 133 enum lookup_type type); 134 135 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length); 136 137 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem, 138 u64 *page, int num_pages, u64 iova); 139 140 void rxe_mem_cleanup(struct rxe_pool_entry *arg); 141 142 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); 143 144 /* rxe_net.c */ 145 int rxe_loopback(struct sk_buff *skb); 146 int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 147 struct sk_buff *skb); 148 __be64 rxe_port_guid(struct rxe_dev *rxe); 149 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, 150 int paylen, struct rxe_pkt_info *pkt); 151 int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 152 struct sk_buff *skb, u32 *crc); 153 enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num); 154 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); 155 struct device *rxe_dma_device(struct rxe_dev *rxe); 156 __be64 rxe_node_guid(struct rxe_dev *rxe); 157 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid); 158 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid); 159 160 /* rxe_qp.c */ 161 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init); 162 163 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 164 struct ib_qp_init_attr *init, struct ib_udata *udata, 165 struct ib_pd *ibpd); 166 167 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init); 168 169 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 170 struct ib_qp_attr *attr, int mask); 171 172 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, 173 int mask, struct ib_udata *udata); 174 175 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask); 176 177 void rxe_qp_error(struct rxe_qp *qp); 178 179 void rxe_qp_destroy(struct rxe_qp *qp); 180 181 void rxe_qp_cleanup(struct rxe_pool_entry *arg); 182 183 static inline int qp_num(struct rxe_qp *qp) 184 { 185 return qp->ibqp.qp_num; 186 } 187 188 static inline enum ib_qp_type qp_type(struct rxe_qp *qp) 189 { 190 return qp->ibqp.qp_type; 191 } 192 193 static inline enum ib_qp_state qp_state(struct rxe_qp *qp) 194 { 195 return qp->attr.qp_state; 196 } 197 198 static inline int qp_mtu(struct rxe_qp *qp) 199 { 200 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) 201 return qp->attr.path_mtu; 202 else 203 return RXE_PORT_MAX_MTU; 204 } 205 206 static inline int rcv_wqe_size(int max_sge) 207 { 208 return sizeof(struct rxe_recv_wqe) + 209 max_sge * sizeof(struct ib_sge); 210 } 211 212 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res); 213 214 static inline void rxe_advance_resp_resource(struct rxe_qp *qp) 215 { 216 qp->resp.res_head++; 217 if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic)) 218 qp->resp.res_head = 0; 219 } 220 221 void retransmit_timer(unsigned long data); 222 void rnr_nak_timer(unsigned long data); 223 224 void dump_qp(struct rxe_qp *qp); 225 226 /* rxe_srq.c */ 227 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT) 228 229 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 230 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask); 231 232 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, 233 struct ib_srq_init_attr *init, 234 struct ib_ucontext *context, struct ib_udata *udata); 235 236 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, 237 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 238 struct ib_udata *udata); 239 240 extern struct ib_dma_mapping_ops rxe_dma_mapping_ops; 241 242 void rxe_release(struct kref *kref); 243 244 void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); 245 int rxe_completer(void *arg); 246 int rxe_requester(void *arg); 247 int rxe_responder(void *arg); 248 249 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb); 250 251 void rxe_resp_queue_pkt(struct rxe_dev *rxe, 252 struct rxe_qp *qp, struct sk_buff *skb); 253 254 void rxe_comp_queue_pkt(struct rxe_dev *rxe, 255 struct rxe_qp *qp, struct sk_buff *skb); 256 257 static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp) 258 { 259 return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type]; 260 } 261 262 static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp, 263 struct rxe_pkt_info *pkt, struct sk_buff *skb) 264 { 265 int err; 266 int is_request = pkt->mask & RXE_REQ_MASK; 267 268 if ((is_request && (qp->req.state != QP_STATE_READY)) || 269 (!is_request && (qp->resp.state != QP_STATE_READY))) { 270 pr_info("Packet dropped. QP is not in ready state\n"); 271 goto drop; 272 } 273 274 if (pkt->mask & RXE_LOOPBACK_MASK) { 275 memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); 276 err = rxe_loopback(skb); 277 } else { 278 err = rxe_send(rxe, pkt, skb); 279 } 280 281 if (err) { 282 rxe->xmit_errors++; 283 return err; 284 } 285 286 if ((qp_type(qp) != IB_QPT_RC) && 287 (pkt->mask & RXE_END_MASK)) { 288 pkt->wqe->state = wqe_state_done; 289 rxe_run_task(&qp->comp.task, 1); 290 } 291 292 goto done; 293 294 drop: 295 kfree_skb(skb); 296 err = 0; 297 done: 298 return err; 299 } 300 301 #endif /* RXE_LOC_H */ 302