xref: /openbmc/linux/drivers/vdpa/mlx5/core/resources.c (revision aa0dc6a7)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #include <linux/mlx5/driver.h>
5 #include "mlx5_vdpa.h"
6 
7 static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
8 {
9 	struct mlx5_core_dev *mdev = dev->mdev;
10 
11 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
12 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
13 	int err;
14 
15 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
16 	MLX5_SET(alloc_pd_in, in, uid, uid);
17 
18 	err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
19 	if (!err)
20 		*pdn = MLX5_GET(alloc_pd_out, out, pd);
21 
22 	return err;
23 }
24 
25 static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
26 {
27 	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
28 	struct mlx5_core_dev *mdev = dev->mdev;
29 
30 	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
31 	MLX5_SET(dealloc_pd_in, in, pd, pdn);
32 	MLX5_SET(dealloc_pd_in, in, uid, uid);
33 	return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
34 }
35 
36 static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
37 {
38 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
39 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
40 	struct mlx5_core_dev *mdev = dev->mdev;
41 	int err;
42 
43 	MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
44 	err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
45 	if (!err)
46 		*null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
47 	return err;
48 }
49 
50 static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
51 {
52 	u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
53 	int inlen;
54 	void *in;
55 	int err;
56 
57 	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
58 		return 0;
59 
60 	/* 0 means not supported */
61 	if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
62 		return -EOPNOTSUPP;
63 
64 	inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
65 	in = kzalloc(inlen, GFP_KERNEL);
66 	if (!in)
67 		return -ENOMEM;
68 
69 	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
70 	MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
71 
72 	err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
73 	kfree(in);
74 	if (!err)
75 		*uid = MLX5_GET(create_uctx_out, out, uid);
76 
77 	return err;
78 }
79 
80 static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
81 {
82 	u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
83 	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
84 
85 	if (!uid)
86 		return;
87 
88 	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
89 	MLX5_SET(destroy_uctx_in, in, uid, uid);
90 
91 	mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
92 }
93 
94 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
95 {
96 	u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
97 	int err;
98 
99 	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
100 	MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
101 	err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
102 	if (!err)
103 		*tisn = MLX5_GET(create_tis_out, out, tisn);
104 
105 	return err;
106 }
107 
108 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
109 {
110 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
111 
112 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
113 	MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
114 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
115 	mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
116 }
117 
118 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
119 {
120 	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
121 	int err;
122 
123 	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
124 	err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
125 	if (!err)
126 		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
127 
128 	return err;
129 }
130 
131 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
132 {
133 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
134 
135 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
136 	MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
137 	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
138 	mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
139 }
140 
141 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
142 {
143 	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
144 	int err;
145 
146 	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
147 	err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
148 	if (!err)
149 		*tirn = MLX5_GET(create_tir_out, out, tirn);
150 
151 	return err;
152 }
153 
154 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
155 {
156 	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
157 
158 	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
159 	MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
160 	MLX5_SET(destroy_tir_in, in, tirn, tirn);
161 	mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
162 }
163 
164 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
165 {
166 	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
167 	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
168 	int err;
169 
170 	MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
171 	MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
172 
173 	err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
174 	if (!err)
175 		*tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
176 
177 	return err;
178 }
179 
180 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
181 {
182 	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
183 
184 	MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
185 	MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
186 	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
187 	mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
188 }
189 
190 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
191 			  int inlen)
192 {
193 	u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
194 	u32 mkey_index;
195 	void *mkc;
196 	int err;
197 
198 	MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
199 	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
200 
201 	err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
202 	if (err)
203 		return err;
204 
205 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
206 	mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
207 	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
208 	mkey->size = MLX5_GET64(mkc, mkc, len);
209 	mkey->key |= mlx5_idx_to_mkey(mkey_index);
210 	mkey->pd = MLX5_GET(mkc, mkc, pd);
211 	return 0;
212 }
213 
214 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
215 {
216 	u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
217 
218 	MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
219 	MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
220 	MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
221 	return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
222 }
223 
224 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
225 {
226 	u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
227 	struct mlx5_vdpa_resources *res = &mvdev->res;
228 	struct mlx5_core_dev *mdev = mvdev->mdev;
229 	u64 kick_addr;
230 	int err;
231 
232 	if (res->valid) {
233 		mlx5_vdpa_warn(mvdev, "resources already allocated\n");
234 		return -EINVAL;
235 	}
236 	mutex_init(&mvdev->mr.mkey_mtx);
237 	res->uar = mlx5_get_uars_page(mdev);
238 	if (IS_ERR(res->uar)) {
239 		err = PTR_ERR(res->uar);
240 		goto err_uars;
241 	}
242 
243 	err = create_uctx(mvdev, &res->uid);
244 	if (err)
245 		goto err_uctx;
246 
247 	err = alloc_pd(mvdev, &res->pdn, res->uid);
248 	if (err)
249 		goto err_pd;
250 
251 	err = get_null_mkey(mvdev, &res->null_mkey);
252 	if (err)
253 		goto err_key;
254 
255 	kick_addr = mdev->bar_addr + offset;
256 	res->phys_kick_addr = kick_addr;
257 
258 	res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
259 	if (!res->kick_addr) {
260 		err = -ENOMEM;
261 		goto err_key;
262 	}
263 	res->valid = true;
264 
265 	return 0;
266 
267 err_key:
268 	dealloc_pd(mvdev, res->pdn, res->uid);
269 err_pd:
270 	destroy_uctx(mvdev, res->uid);
271 err_uctx:
272 	mlx5_put_uars_page(mdev, res->uar);
273 err_uars:
274 	mutex_destroy(&mvdev->mr.mkey_mtx);
275 	return err;
276 }
277 
278 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
279 {
280 	struct mlx5_vdpa_resources *res = &mvdev->res;
281 
282 	if (!res->valid)
283 		return;
284 
285 	iounmap(res->kick_addr);
286 	res->kick_addr = NULL;
287 	dealloc_pd(mvdev, res->pdn, res->uid);
288 	destroy_uctx(mvdev, res->uid);
289 	mlx5_put_uars_page(mvdev->mdev, res->uar);
290 	mutex_destroy(&mvdev->mr.mkey_mtx);
291 	res->valid = false;
292 }
293