xref: /openbmc/linux/drivers/infiniband/hw/mlx5/cmd.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
4  */
5 
6 #include "cmd.h"
7 
8 int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
9 {
10 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
11 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
12 	int err;
13 
14 	MLX5_SET(query_special_contexts_in, in, opcode,
15 		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
16 	err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
17 	if (!err)
18 		*mkey = MLX5_GET(query_special_contexts_out, out,
19 				 dump_fill_mkey);
20 	return err;
21 }
22 
23 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
24 {
25 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
26 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
27 	int err;
28 
29 	MLX5_SET(query_special_contexts_in, in, opcode,
30 		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
31 	err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
32 	if (!err)
33 		*null_mkey = MLX5_GET(query_special_contexts_out, out,
34 				      null_mkey);
35 	return err;
36 }
37 
38 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
39 			       void *out)
40 {
41 	u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
42 
43 	MLX5_SET(query_cong_params_in, in, opcode,
44 		 MLX5_CMD_OP_QUERY_CONG_PARAMS);
45 	MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
46 
47 	return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
48 }
49 
50 int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
51 			 u64 length, u32 alignment)
52 {
53 	struct mlx5_core_dev *dev = dm->dev;
54 	u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
55 					>> PAGE_SHIFT;
56 	u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
57 	u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
58 	u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
59 	u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
60 	u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
61 	u32 mlx5_alignment;
62 	u64 page_idx = 0;
63 	int ret = 0;
64 
65 	if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
66 		return -EINVAL;
67 
68 	/* mlx5 device sets alignment as 64*2^driver_value
69 	 * so normalizing is needed.
70 	 */
71 	mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
72 			 alignment - MLX5_MEMIC_BASE_ALIGN;
73 	if (mlx5_alignment > max_alignment)
74 		return -EINVAL;
75 
76 	MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
77 	MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
78 	MLX5_SET(alloc_memic_in, in, memic_size, length);
79 	MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
80 		 mlx5_alignment);
81 
82 	while (page_idx < num_memic_hw_pages) {
83 		spin_lock(&dm->lock);
84 		page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
85 						      num_memic_hw_pages,
86 						      page_idx,
87 						      num_pages, 0);
88 
89 		if (page_idx < num_memic_hw_pages)
90 			bitmap_set(dm->memic_alloc_pages,
91 				   page_idx, num_pages);
92 
93 		spin_unlock(&dm->lock);
94 
95 		if (page_idx >= num_memic_hw_pages)
96 			break;
97 
98 		MLX5_SET64(alloc_memic_in, in, range_start_addr,
99 			   hw_start_addr + (page_idx * PAGE_SIZE));
100 
101 		ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
102 		if (ret) {
103 			spin_lock(&dm->lock);
104 			bitmap_clear(dm->memic_alloc_pages,
105 				     page_idx, num_pages);
106 			spin_unlock(&dm->lock);
107 
108 			if (ret == -EAGAIN) {
109 				page_idx++;
110 				continue;
111 			}
112 
113 			return ret;
114 		}
115 
116 		*addr = dev->bar_addr +
117 			MLX5_GET64(alloc_memic_out, out, memic_start_addr);
118 
119 		return 0;
120 	}
121 
122 	return -ENOMEM;
123 }
124 
125 void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
126 {
127 	struct mlx5_core_dev *dev = dm->dev;
128 	u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
129 	u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
130 	u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
131 	u64 start_page_idx;
132 	int err;
133 
134 	addr -= dev->bar_addr;
135 	start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
136 
137 	MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
138 	MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
139 	MLX5_SET(dealloc_memic_in, in, memic_size, length);
140 
141 	err =  mlx5_cmd_exec_in(dev, dealloc_memic, in);
142 	if (err)
143 		return;
144 
145 	spin_lock(&dm->lock);
146 	bitmap_clear(dm->memic_alloc_pages,
147 		     start_page_idx, num_pages);
148 	spin_unlock(&dm->lock);
149 }
150 
151 int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
152 {
153 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
154 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
155 
156 	MLX5_SET(ppcnt_reg, in, local_port, 1);
157 
158 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
159 	return  mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
160 				     0, 0);
161 }
162 
163 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
164 {
165 	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
166 
167 	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
168 	MLX5_SET(destroy_tir_in, in, tirn, tirn);
169 	MLX5_SET(destroy_tir_in, in, uid, uid);
170 	mlx5_cmd_exec_in(dev, destroy_tir, in);
171 }
172 
173 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
174 {
175 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
176 
177 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
178 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
179 	MLX5_SET(destroy_tis_in, in, uid, uid);
180 	mlx5_cmd_exec_in(dev, destroy_tis, in);
181 }
182 
183 void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
184 {
185 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
186 
187 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
188 	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
189 	MLX5_SET(destroy_rqt_in, in, uid, uid);
190 	mlx5_cmd_exec_in(dev, destroy_rqt, in);
191 }
192 
193 int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
194 				    u16 uid)
195 {
196 	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
197 	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
198 	int err;
199 
200 	MLX5_SET(alloc_transport_domain_in, in, opcode,
201 		 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
202 	MLX5_SET(alloc_transport_domain_in, in, uid, uid);
203 
204 	err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
205 	if (!err)
206 		*tdn = MLX5_GET(alloc_transport_domain_out, out,
207 				transport_domain);
208 
209 	return err;
210 }
211 
212 void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
213 				       u16 uid)
214 {
215 	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
216 
217 	MLX5_SET(dealloc_transport_domain_in, in, opcode,
218 		 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
219 	MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
220 	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
221 	mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
222 }
223 
224 void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
225 {
226 	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
227 
228 	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
229 	MLX5_SET(dealloc_pd_in, in, pd, pdn);
230 	MLX5_SET(dealloc_pd_in, in, uid, uid);
231 	mlx5_cmd_exec_in(dev, dealloc_pd, in);
232 }
233 
234 int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
235 			u32 qpn, u16 uid)
236 {
237 	u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
238 	void *gid;
239 
240 	MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
241 	MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
242 	MLX5_SET(attach_to_mcg_in, in, uid, uid);
243 	gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
244 	memcpy(gid, mgid, sizeof(*mgid));
245 	return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
246 }
247 
248 int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
249 			u32 qpn, u16 uid)
250 {
251 	u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
252 	void *gid;
253 
254 	MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
255 	MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
256 	MLX5_SET(detach_from_mcg_in, in, uid, uid);
257 	gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
258 	memcpy(gid, mgid, sizeof(*mgid));
259 	return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
260 }
261 
262 int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
263 {
264 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
265 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
266 	int err;
267 
268 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
269 	MLX5_SET(alloc_xrcd_in, in, uid, uid);
270 	err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
271 	if (!err)
272 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
273 	return err;
274 }
275 
276 int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
277 {
278 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
279 
280 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
281 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
282 	MLX5_SET(dealloc_xrcd_in, in, uid, uid);
283 	return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
284 }
285 
286 int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
287 		     u16 opmod, u8 port)
288 {
289 	int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
290 	int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
291 	int err = -ENOMEM;
292 	void *data;
293 	void *resp;
294 	u32 *out;
295 	u32 *in;
296 
297 	in = kzalloc(inlen, GFP_KERNEL);
298 	out = kzalloc(outlen, GFP_KERNEL);
299 	if (!in || !out)
300 		goto out;
301 
302 	MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
303 	MLX5_SET(mad_ifc_in, in, op_mod, opmod);
304 	MLX5_SET(mad_ifc_in, in, port, port);
305 
306 	data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
307 	memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
308 
309 	err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
310 	if (err)
311 		goto out;
312 
313 	resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
314 	memcpy(outb, resp,
315 	       MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
316 
317 out:
318 	kfree(out);
319 	kfree(in);
320 	return err;
321 }
322