xref: /openbmc/linux/drivers/vdpa/mlx5/core/resources.c (revision 3c8c1539)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #include <linux/iova.h>
5 #include <linux/mlx5/driver.h>
6 #include "mlx5_vdpa.h"
7 
8 static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
9 {
10 	struct mlx5_core_dev *mdev = dev->mdev;
11 
12 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
14 	int err;
15 
16 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
17 	MLX5_SET(alloc_pd_in, in, uid, uid);
18 
19 	err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
20 	if (!err)
21 		*pdn = MLX5_GET(alloc_pd_out, out, pd);
22 
23 	return err;
24 }
25 
26 static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
27 {
28 	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
29 	struct mlx5_core_dev *mdev = dev->mdev;
30 
31 	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
32 	MLX5_SET(dealloc_pd_in, in, pd, pdn);
33 	MLX5_SET(dealloc_pd_in, in, uid, uid);
34 	return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
35 }
36 
37 static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
38 {
39 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
40 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
41 	struct mlx5_core_dev *mdev = dev->mdev;
42 	int err;
43 
44 	MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
45 	err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
46 	if (!err)
47 		*null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
48 	return err;
49 }
50 
51 static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
52 {
53 	u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
54 	int inlen;
55 	void *in;
56 	int err;
57 
58 	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
59 		return 0;
60 
61 	/* 0 means not supported */
62 	if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
63 		return -EOPNOTSUPP;
64 
65 	inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
66 	in = kzalloc(inlen, GFP_KERNEL);
67 	if (!in)
68 		return -ENOMEM;
69 
70 	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
71 	MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
72 
73 	err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
74 	kfree(in);
75 	if (!err)
76 		*uid = MLX5_GET(create_uctx_out, out, uid);
77 
78 	return err;
79 }
80 
81 static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
82 {
83 	u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
84 	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
85 
86 	if (!uid)
87 		return;
88 
89 	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
90 	MLX5_SET(destroy_uctx_in, in, uid, uid);
91 
92 	mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
93 }
94 
95 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
96 {
97 	u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
98 	int err;
99 
100 	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
101 	MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
102 	err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
103 	if (!err)
104 		*tisn = MLX5_GET(create_tis_out, out, tisn);
105 
106 	return err;
107 }
108 
109 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
110 {
111 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
112 
113 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
114 	MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
115 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
116 	mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
117 }
118 
119 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
120 {
121 	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
122 	int err;
123 
124 	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
125 	err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
126 	if (!err)
127 		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
128 
129 	return err;
130 }
131 
132 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
133 {
134 	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
135 
136 	MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
137 	MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
138 	MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
139 	return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
140 }
141 
142 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
143 {
144 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
145 
146 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
147 	MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
148 	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
149 	mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
150 }
151 
152 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
153 {
154 	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
155 	int err;
156 
157 	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
158 	err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
159 	if (!err)
160 		*tirn = MLX5_GET(create_tir_out, out, tirn);
161 
162 	return err;
163 }
164 
165 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
166 {
167 	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
168 
169 	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
170 	MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
171 	MLX5_SET(destroy_tir_in, in, tirn, tirn);
172 	mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
173 }
174 
175 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
176 {
177 	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
178 	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
179 	int err;
180 
181 	MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
182 	MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
183 
184 	err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
185 	if (!err)
186 		*tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
187 
188 	return err;
189 }
190 
191 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
192 {
193 	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
194 
195 	MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
196 	MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
197 	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
198 	mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
199 }
200 
201 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
202 			  int inlen)
203 {
204 	u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
205 	u32 mkey_index;
206 	void *mkc;
207 	int err;
208 
209 	MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
210 	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
211 
212 	err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
213 	if (err)
214 		return err;
215 
216 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
217 	mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
218 	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
219 	mkey->size = MLX5_GET64(mkc, mkc, len);
220 	mkey->key |= mlx5_idx_to_mkey(mkey_index);
221 	mkey->pd = MLX5_GET(mkc, mkc, pd);
222 	return 0;
223 }
224 
225 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
226 {
227 	u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
228 
229 	MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
230 	MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
231 	MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
232 	return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
233 }
234 
235 static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
236 {
237 	mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
238 	if (!mvdev->cvq.iotlb)
239 		return -ENOMEM;
240 
241 	vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
242 
243 	return 0;
244 }
245 
246 static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
247 {
248 	vhost_iotlb_free(mvdev->cvq.iotlb);
249 }
250 
251 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
252 {
253 	u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
254 	struct mlx5_vdpa_resources *res = &mvdev->res;
255 	struct mlx5_core_dev *mdev = mvdev->mdev;
256 	u64 kick_addr;
257 	int err;
258 
259 	if (res->valid) {
260 		mlx5_vdpa_warn(mvdev, "resources already allocated\n");
261 		return -EINVAL;
262 	}
263 	mutex_init(&mvdev->mr.mkey_mtx);
264 	res->uar = mlx5_get_uars_page(mdev);
265 	if (IS_ERR(res->uar)) {
266 		err = PTR_ERR(res->uar);
267 		goto err_uars;
268 	}
269 
270 	err = create_uctx(mvdev, &res->uid);
271 	if (err)
272 		goto err_uctx;
273 
274 	err = alloc_pd(mvdev, &res->pdn, res->uid);
275 	if (err)
276 		goto err_pd;
277 
278 	err = get_null_mkey(mvdev, &res->null_mkey);
279 	if (err)
280 		goto err_key;
281 
282 	kick_addr = mdev->bar_addr + offset;
283 	res->phys_kick_addr = kick_addr;
284 
285 	res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
286 	if (!res->kick_addr) {
287 		err = -ENOMEM;
288 		goto err_key;
289 	}
290 
291 	err = init_ctrl_vq(mvdev);
292 	if (err)
293 		goto err_ctrl;
294 
295 	res->valid = true;
296 
297 	return 0;
298 
299 err_ctrl:
300 	iounmap(res->kick_addr);
301 err_key:
302 	dealloc_pd(mvdev, res->pdn, res->uid);
303 err_pd:
304 	destroy_uctx(mvdev, res->uid);
305 err_uctx:
306 	mlx5_put_uars_page(mdev, res->uar);
307 err_uars:
308 	mutex_destroy(&mvdev->mr.mkey_mtx);
309 	return err;
310 }
311 
312 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
313 {
314 	struct mlx5_vdpa_resources *res = &mvdev->res;
315 
316 	if (!res->valid)
317 		return;
318 
319 	cleanup_ctrl_vq(mvdev);
320 	iounmap(res->kick_addr);
321 	res->kick_addr = NULL;
322 	dealloc_pd(mvdev, res->pdn, res->uid);
323 	destroy_uctx(mvdev, res->uid);
324 	mlx5_put_uars_page(mvdev->mdev, res->uar);
325 	mutex_destroy(&mvdev->mr.mkey_mtx);
326 	res->valid = false;
327 }
328