xref: /openbmc/linux/drivers/infiniband/hw/mlx5/wr.h (revision fe765aeb)
1029e88fdSLeon Romanovsky /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2029e88fdSLeon Romanovsky /*
3029e88fdSLeon Romanovsky  * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4029e88fdSLeon Romanovsky  */
5029e88fdSLeon Romanovsky 
6029e88fdSLeon Romanovsky #ifndef _MLX5_IB_WR_H
7029e88fdSLeon Romanovsky #define _MLX5_IB_WR_H
8029e88fdSLeon Romanovsky 
9029e88fdSLeon Romanovsky #include "mlx5_ib.h"
10029e88fdSLeon Romanovsky 
11029e88fdSLeon Romanovsky enum {
12029e88fdSLeon Romanovsky 	MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
13029e88fdSLeon Romanovsky };
14029e88fdSLeon Romanovsky 
15029e88fdSLeon Romanovsky struct mlx5_wqe_eth_pad {
16029e88fdSLeon Romanovsky 	u8 rsvd0[16];
17029e88fdSLeon Romanovsky };
18029e88fdSLeon Romanovsky 
19029e88fdSLeon Romanovsky 
20029e88fdSLeon Romanovsky /* get_sq_edge - Get the next nearby edge.
21029e88fdSLeon Romanovsky  *
22029e88fdSLeon Romanovsky  * An 'edge' is defined as the first following address after the end
23029e88fdSLeon Romanovsky  * of the fragment or the SQ. Accordingly, during the WQE construction
24029e88fdSLeon Romanovsky  * which repetitively increases the pointer to write the next data, it
25029e88fdSLeon Romanovsky  * simply should check if it gets to an edge.
26029e88fdSLeon Romanovsky  *
27029e88fdSLeon Romanovsky  * @sq - SQ buffer.
28029e88fdSLeon Romanovsky  * @idx - Stride index in the SQ buffer.
29029e88fdSLeon Romanovsky  *
30029e88fdSLeon Romanovsky  * Return:
31029e88fdSLeon Romanovsky  *	The new edge.
32029e88fdSLeon Romanovsky  */
get_sq_edge(struct mlx5_ib_wq * sq,u32 idx)33029e88fdSLeon Romanovsky static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
34029e88fdSLeon Romanovsky {
35029e88fdSLeon Romanovsky 	void *fragment_end;
36029e88fdSLeon Romanovsky 
37029e88fdSLeon Romanovsky 	fragment_end = mlx5_frag_buf_get_wqe
38029e88fdSLeon Romanovsky 		(&sq->fbc,
39029e88fdSLeon Romanovsky 		 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
40029e88fdSLeon Romanovsky 
41029e88fdSLeon Romanovsky 	return fragment_end + MLX5_SEND_WQE_BB;
42029e88fdSLeon Romanovsky }
43029e88fdSLeon Romanovsky 
44*fe765aebSAharon Landau /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
45*fe765aebSAharon Landau  * next nearby edge and get new address translation for current WQE position.
46*fe765aebSAharon Landau  * @sq: SQ buffer.
47*fe765aebSAharon Landau  * @seg: Current WQE position (16B aligned).
48*fe765aebSAharon Landau  * @wqe_sz: Total current WQE size [16B].
49*fe765aebSAharon Landau  * @cur_edge: Updated current edge.
50*fe765aebSAharon Landau  */
handle_post_send_edge(struct mlx5_ib_wq * sq,void ** seg,u32 wqe_sz,void ** cur_edge)51*fe765aebSAharon Landau static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
52*fe765aebSAharon Landau 					 u32 wqe_sz, void **cur_edge)
53*fe765aebSAharon Landau {
54*fe765aebSAharon Landau 	u32 idx;
55*fe765aebSAharon Landau 
56*fe765aebSAharon Landau 	if (likely(*seg != *cur_edge))
57*fe765aebSAharon Landau 		return;
58*fe765aebSAharon Landau 
59*fe765aebSAharon Landau 	idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
60*fe765aebSAharon Landau 	*cur_edge = get_sq_edge(sq, idx);
61*fe765aebSAharon Landau 
62*fe765aebSAharon Landau 	*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
63*fe765aebSAharon Landau }
64*fe765aebSAharon Landau 
65*fe765aebSAharon Landau /* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
66*fe765aebSAharon Landau  * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
67*fe765aebSAharon Landau  * @sq: SQ buffer.
68*fe765aebSAharon Landau  * @cur_edge: Updated current edge.
69*fe765aebSAharon Landau  * @seg: Current WQE position (16B aligned).
70*fe765aebSAharon Landau  * @wqe_sz: Total current WQE size [16B].
71*fe765aebSAharon Landau  * @src: Pointer to copy from.
72*fe765aebSAharon Landau  * @n: Number of bytes to copy.
73*fe765aebSAharon Landau  */
mlx5r_memcpy_send_wqe(struct mlx5_ib_wq * sq,void ** cur_edge,void ** seg,u32 * wqe_sz,const void * src,size_t n)74*fe765aebSAharon Landau static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
75*fe765aebSAharon Landau 					 void **seg, u32 *wqe_sz,
76*fe765aebSAharon Landau 					 const void *src, size_t n)
77*fe765aebSAharon Landau {
78*fe765aebSAharon Landau 	while (likely(n)) {
79*fe765aebSAharon Landau 		size_t leftlen = *cur_edge - *seg;
80*fe765aebSAharon Landau 		size_t copysz = min_t(size_t, leftlen, n);
81*fe765aebSAharon Landau 		size_t stride;
82*fe765aebSAharon Landau 
83*fe765aebSAharon Landau 		memcpy(*seg, src, copysz);
84*fe765aebSAharon Landau 
85*fe765aebSAharon Landau 		n -= copysz;
86*fe765aebSAharon Landau 		src += copysz;
87*fe765aebSAharon Landau 		stride = !n ? ALIGN(copysz, 16) : copysz;
88*fe765aebSAharon Landau 		*seg += stride;
89*fe765aebSAharon Landau 		*wqe_sz += stride >> 4;
90*fe765aebSAharon Landau 		handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
91*fe765aebSAharon Landau 	}
92*fe765aebSAharon Landau }
93*fe765aebSAharon Landau 
94*fe765aebSAharon Landau int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
95*fe765aebSAharon Landau int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
96*fe765aebSAharon Landau 		    struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
97*fe765aebSAharon Landau 		    int *size, void **cur_edge, int nreq, __be32 general_id,
98*fe765aebSAharon Landau 		    bool send_signaled, bool solicited);
99*fe765aebSAharon Landau void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
100*fe765aebSAharon Landau 		      void *seg, u8 size, void *cur_edge, unsigned int idx,
101*fe765aebSAharon Landau 		      u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
102*fe765aebSAharon Landau void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
103*fe765aebSAharon Landau 		   struct mlx5_wqe_ctrl_seg *ctrl);
104029e88fdSLeon Romanovsky int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
105029e88fdSLeon Romanovsky 		      const struct ib_send_wr **bad_wr, bool drain);
106029e88fdSLeon Romanovsky int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
107029e88fdSLeon Romanovsky 		      const struct ib_recv_wr **bad_wr, bool drain);
108029e88fdSLeon Romanovsky 
mlx5_ib_post_send_nodrain(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)109029e88fdSLeon Romanovsky static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
110029e88fdSLeon Romanovsky 					    const struct ib_send_wr *wr,
111029e88fdSLeon Romanovsky 					    const struct ib_send_wr **bad_wr)
112029e88fdSLeon Romanovsky {
113029e88fdSLeon Romanovsky 	return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
114029e88fdSLeon Romanovsky }
115029e88fdSLeon Romanovsky 
mlx5_ib_post_send_drain(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)116029e88fdSLeon Romanovsky static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
117029e88fdSLeon Romanovsky 					  const struct ib_send_wr *wr,
118029e88fdSLeon Romanovsky 					  const struct ib_send_wr **bad_wr)
119029e88fdSLeon Romanovsky {
120029e88fdSLeon Romanovsky 	return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
121029e88fdSLeon Romanovsky }
122029e88fdSLeon Romanovsky 
mlx5_ib_post_recv_nodrain(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)123029e88fdSLeon Romanovsky static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
124029e88fdSLeon Romanovsky 					    const struct ib_recv_wr *wr,
125029e88fdSLeon Romanovsky 					    const struct ib_recv_wr **bad_wr)
126029e88fdSLeon Romanovsky {
127029e88fdSLeon Romanovsky 	return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
128029e88fdSLeon Romanovsky }
129029e88fdSLeon Romanovsky 
mlx5_ib_post_recv_drain(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)130029e88fdSLeon Romanovsky static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
131029e88fdSLeon Romanovsky 					  const struct ib_recv_wr *wr,
132029e88fdSLeon Romanovsky 					  const struct ib_recv_wr **bad_wr)
133029e88fdSLeon Romanovsky {
134029e88fdSLeon Romanovsky 	return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
135029e88fdSLeon Romanovsky }
136029e88fdSLeon Romanovsky #endif /* _MLX5_IB_WR_H */
137