1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include "wq.h"
35 #include "mlx5_core.h"
36 
37 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
38 {
39 	return (u32)wq->fbc.sz_m1 + 1;
40 }
41 
42 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
43 {
44 	return wq->fbc.sz_m1 + 1;
45 }
46 
47 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
48 {
49 	return (u32)wq->fbc.sz_m1 + 1;
50 }
51 
52 static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
53 {
54 	return ((u32)1 << log_sz) << log_stride;
55 }
56 
57 int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
58 		       void *wqc, struct mlx5_wq_cyc *wq,
59 		       struct mlx5_wq_ctrl *wq_ctrl)
60 {
61 	u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
62 	u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
63 	struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
64 	int err;
65 
66 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
67 	if (err) {
68 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
69 		return err;
70 	}
71 
72 	wq->db  = wq_ctrl->db.db;
73 
74 	err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
75 				       &wq_ctrl->buf, param->buf_numa_node);
76 	if (err) {
77 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
78 		goto err_db_free;
79 	}
80 
81 	mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
82 	wq->sz = mlx5_wq_cyc_get_size(wq);
83 
84 	wq_ctrl->mdev = mdev;
85 
86 	return 0;
87 
88 err_db_free:
89 	mlx5_db_free(mdev, &wq_ctrl->db);
90 
91 	return err;
92 }
93 
94 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
95 		      void *qpc, struct mlx5_wq_qp *wq,
96 		      struct mlx5_wq_ctrl *wq_ctrl)
97 {
98 	u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
99 	u8 log_rq_sz     = MLX5_GET(qpc, qpc, log_rq_size);
100 	u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
101 	u8 log_sq_sz     = MLX5_GET(qpc, qpc, log_sq_size);
102 
103 	u32 rq_byte_size;
104 	int err;
105 
106 
107 
108 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
109 	if (err) {
110 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
111 		return err;
112 	}
113 
114 	err = mlx5_frag_buf_alloc_node(mdev,
115 				       wq_get_byte_sz(log_rq_sz, log_rq_stride) +
116 				       wq_get_byte_sz(log_sq_sz, log_sq_stride),
117 				       &wq_ctrl->buf, param->buf_numa_node);
118 	if (err) {
119 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
120 		goto err_db_free;
121 	}
122 
123 	mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
124 
125 	rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
126 
127 	if (rq_byte_size < PAGE_SIZE) {
128 		/* SQ starts within the same page of the RQ */
129 		u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
130 
131 		mlx5_init_fbc_offset(wq_ctrl->buf.frags,
132 				     log_sq_stride, log_sq_sz, sq_strides_offset,
133 				     &wq->sq.fbc);
134 	} else {
135 		u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
136 
137 		mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
138 			      log_sq_stride, log_sq_sz, &wq->sq.fbc);
139 	}
140 
141 	wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
142 	wq->sq.db  = &wq_ctrl->db.db[MLX5_SND_DBR];
143 
144 	wq_ctrl->mdev = mdev;
145 
146 	return 0;
147 
148 err_db_free:
149 	mlx5_db_free(mdev, &wq_ctrl->db);
150 
151 	return err;
152 }
153 
154 int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
155 		     void *cqc, struct mlx5_cqwq *wq,
156 		     struct mlx5_wq_ctrl *wq_ctrl)
157 {
158 	/* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
159 	u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
160 	u8 log_wq_sz     = MLX5_GET(cqc, cqc, log_cq_size);
161 	int err;
162 
163 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
164 	if (err) {
165 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
166 		return err;
167 	}
168 
169 	wq->db  = wq_ctrl->db.db;
170 
171 	err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
172 				       &wq_ctrl->buf,
173 				       param->buf_numa_node);
174 	if (err) {
175 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
176 			       err);
177 		goto err_db_free;
178 	}
179 
180 	mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
181 
182 	wq_ctrl->mdev = mdev;
183 
184 	return 0;
185 
186 err_db_free:
187 	mlx5_db_free(mdev, &wq_ctrl->db);
188 
189 	return err;
190 }
191 
192 int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
193 		      void *wqc, struct mlx5_wq_ll *wq,
194 		      struct mlx5_wq_ctrl *wq_ctrl)
195 {
196 	u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
197 	u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
198 	struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
199 	struct mlx5_wqe_srq_next_seg *next_seg;
200 	int err;
201 	int i;
202 
203 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
204 	if (err) {
205 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
206 		return err;
207 	}
208 
209 	wq->db  = wq_ctrl->db.db;
210 
211 	err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
212 				       &wq_ctrl->buf, param->buf_numa_node);
213 	if (err) {
214 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
215 		goto err_db_free;
216 	}
217 
218 	mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
219 
220 	for (i = 0; i < fbc->sz_m1; i++) {
221 		next_seg = mlx5_wq_ll_get_wqe(wq, i);
222 		next_seg->next_wqe_index = cpu_to_be16(i + 1);
223 	}
224 	next_seg = mlx5_wq_ll_get_wqe(wq, i);
225 	wq->tail_next = &next_seg->next_wqe_index;
226 
227 	wq_ctrl->mdev = mdev;
228 
229 	return 0;
230 
231 err_db_free:
232 	mlx5_db_free(mdev, &wq_ctrl->db);
233 
234 	return err;
235 }
236 
237 void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
238 {
239 	mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
240 	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
241 }
242 
243