xref: /openbmc/linux/drivers/vdpa/mlx5/net/mlx5_vnet.c (revision 1a86b377aa2147a7c866b03142e848c18e5f3cb8)
1*1a86b377SEli Cohen // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*1a86b377SEli Cohen /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3*1a86b377SEli Cohen 
4*1a86b377SEli Cohen #include <linux/vdpa.h>
5*1a86b377SEli Cohen #include <uapi/linux/virtio_ids.h>
6*1a86b377SEli Cohen #include <linux/virtio_config.h>
7*1a86b377SEli Cohen #include <linux/mlx5/qp.h>
8*1a86b377SEli Cohen #include <linux/mlx5/device.h>
9*1a86b377SEli Cohen #include <linux/mlx5/vport.h>
10*1a86b377SEli Cohen #include <linux/mlx5/fs.h>
11*1a86b377SEli Cohen #include <linux/mlx5/device.h>
12*1a86b377SEli Cohen #include "mlx5_vnet.h"
13*1a86b377SEli Cohen #include "mlx5_vdpa_ifc.h"
14*1a86b377SEli Cohen #include "mlx5_vdpa.h"
15*1a86b377SEli Cohen 
16*1a86b377SEli Cohen #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
17*1a86b377SEli Cohen 
18*1a86b377SEli Cohen #define VALID_FEATURES_MASK                                                                        \
19*1a86b377SEli Cohen 	(BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) |                                   \
20*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) |   \
21*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) |                             \
22*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \
23*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) |   \
24*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) |      \
25*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) |                                 \
26*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) |                      \
27*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) |  \
28*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) |           \
29*1a86b377SEli Cohen 	 BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) |                          \
30*1a86b377SEli Cohen 	 BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) |      \
31*1a86b377SEli Cohen 	 BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV))
32*1a86b377SEli Cohen 
33*1a86b377SEli Cohen #define VALID_STATUS_MASK                                                                          \
34*1a86b377SEli Cohen 	(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK |        \
35*1a86b377SEli Cohen 	 VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
36*1a86b377SEli Cohen 
37*1a86b377SEli Cohen struct mlx5_vdpa_net_resources {
38*1a86b377SEli Cohen 	u32 tisn;
39*1a86b377SEli Cohen 	u32 tdn;
40*1a86b377SEli Cohen 	u32 tirn;
41*1a86b377SEli Cohen 	u32 rqtn;
42*1a86b377SEli Cohen 	bool valid;
43*1a86b377SEli Cohen };
44*1a86b377SEli Cohen 
45*1a86b377SEli Cohen struct mlx5_vdpa_cq_buf {
46*1a86b377SEli Cohen 	struct mlx5_frag_buf_ctrl fbc;
47*1a86b377SEli Cohen 	struct mlx5_frag_buf frag_buf;
48*1a86b377SEli Cohen 	int cqe_size;
49*1a86b377SEli Cohen 	int nent;
50*1a86b377SEli Cohen };
51*1a86b377SEli Cohen 
52*1a86b377SEli Cohen struct mlx5_vdpa_cq {
53*1a86b377SEli Cohen 	struct mlx5_core_cq mcq;
54*1a86b377SEli Cohen 	struct mlx5_vdpa_cq_buf buf;
55*1a86b377SEli Cohen 	struct mlx5_db db;
56*1a86b377SEli Cohen 	int cqe;
57*1a86b377SEli Cohen };
58*1a86b377SEli Cohen 
59*1a86b377SEli Cohen struct mlx5_vdpa_umem {
60*1a86b377SEli Cohen 	struct mlx5_frag_buf_ctrl fbc;
61*1a86b377SEli Cohen 	struct mlx5_frag_buf frag_buf;
62*1a86b377SEli Cohen 	int size;
63*1a86b377SEli Cohen 	u32 id;
64*1a86b377SEli Cohen };
65*1a86b377SEli Cohen 
66*1a86b377SEli Cohen struct mlx5_vdpa_qp {
67*1a86b377SEli Cohen 	struct mlx5_core_qp mqp;
68*1a86b377SEli Cohen 	struct mlx5_frag_buf frag_buf;
69*1a86b377SEli Cohen 	struct mlx5_db db;
70*1a86b377SEli Cohen 	u16 head;
71*1a86b377SEli Cohen 	bool fw;
72*1a86b377SEli Cohen };
73*1a86b377SEli Cohen 
74*1a86b377SEli Cohen struct mlx5_vq_restore_info {
75*1a86b377SEli Cohen 	u32 num_ent;
76*1a86b377SEli Cohen 	u64 desc_addr;
77*1a86b377SEli Cohen 	u64 device_addr;
78*1a86b377SEli Cohen 	u64 driver_addr;
79*1a86b377SEli Cohen 	u16 avail_index;
80*1a86b377SEli Cohen 	bool ready;
81*1a86b377SEli Cohen 	struct vdpa_callback cb;
82*1a86b377SEli Cohen 	bool restore;
83*1a86b377SEli Cohen };
84*1a86b377SEli Cohen 
85*1a86b377SEli Cohen struct mlx5_vdpa_virtqueue {
86*1a86b377SEli Cohen 	bool ready;
87*1a86b377SEli Cohen 	u64 desc_addr;
88*1a86b377SEli Cohen 	u64 device_addr;
89*1a86b377SEli Cohen 	u64 driver_addr;
90*1a86b377SEli Cohen 	u32 num_ent;
91*1a86b377SEli Cohen 	struct vdpa_callback event_cb;
92*1a86b377SEli Cohen 
93*1a86b377SEli Cohen 	/* Resources for implementing the notification channel from the device
94*1a86b377SEli Cohen 	 * to the driver. fwqp is the firmware end of an RC connection; the
95*1a86b377SEli Cohen 	 * other end is vqqp used by the driver. cq is is where completions are
96*1a86b377SEli Cohen 	 * reported.
97*1a86b377SEli Cohen 	 */
98*1a86b377SEli Cohen 	struct mlx5_vdpa_cq cq;
99*1a86b377SEli Cohen 	struct mlx5_vdpa_qp fwqp;
100*1a86b377SEli Cohen 	struct mlx5_vdpa_qp vqqp;
101*1a86b377SEli Cohen 
102*1a86b377SEli Cohen 	/* umem resources are required for the virtqueue operation. They're use
103*1a86b377SEli Cohen 	 * is internal and they must be provided by the driver.
104*1a86b377SEli Cohen 	 */
105*1a86b377SEli Cohen 	struct mlx5_vdpa_umem umem1;
106*1a86b377SEli Cohen 	struct mlx5_vdpa_umem umem2;
107*1a86b377SEli Cohen 	struct mlx5_vdpa_umem umem3;
108*1a86b377SEli Cohen 
109*1a86b377SEli Cohen 	bool initialized;
110*1a86b377SEli Cohen 	int index;
111*1a86b377SEli Cohen 	u32 virtq_id;
112*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev;
113*1a86b377SEli Cohen 	u16 avail_idx;
114*1a86b377SEli Cohen 	int fw_state;
115*1a86b377SEli Cohen 
116*1a86b377SEli Cohen 	/* keep last in the struct */
117*1a86b377SEli Cohen 	struct mlx5_vq_restore_info ri;
118*1a86b377SEli Cohen };
119*1a86b377SEli Cohen 
120*1a86b377SEli Cohen /* We will remove this limitation once mlx5_vdpa_alloc_resources()
121*1a86b377SEli Cohen  * provides for driver space allocation
122*1a86b377SEli Cohen  */
123*1a86b377SEli Cohen #define MLX5_MAX_SUPPORTED_VQS 16
124*1a86b377SEli Cohen 
125*1a86b377SEli Cohen struct mlx5_vdpa_net {
126*1a86b377SEli Cohen 	struct mlx5_vdpa_dev mvdev;
127*1a86b377SEli Cohen 	struct mlx5_vdpa_net_resources res;
128*1a86b377SEli Cohen 	struct virtio_net_config config;
129*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
130*1a86b377SEli Cohen 
131*1a86b377SEli Cohen 	/* Serialize vq resources creation and destruction. This is required
132*1a86b377SEli Cohen 	 * since memory map might change and we need to destroy and create
133*1a86b377SEli Cohen 	 * resources while driver in operational.
134*1a86b377SEli Cohen 	 */
135*1a86b377SEli Cohen 	struct mutex reslock;
136*1a86b377SEli Cohen 	struct mlx5_flow_table *rxft;
137*1a86b377SEli Cohen 	struct mlx5_fc *rx_counter;
138*1a86b377SEli Cohen 	struct mlx5_flow_handle *rx_rule;
139*1a86b377SEli Cohen 	bool setup;
140*1a86b377SEli Cohen };
141*1a86b377SEli Cohen 
142*1a86b377SEli Cohen static void free_resources(struct mlx5_vdpa_net *ndev);
143*1a86b377SEli Cohen static void init_mvqs(struct mlx5_vdpa_net *ndev);
144*1a86b377SEli Cohen static int setup_driver(struct mlx5_vdpa_net *ndev);
145*1a86b377SEli Cohen static void teardown_driver(struct mlx5_vdpa_net *ndev);
146*1a86b377SEli Cohen 
147*1a86b377SEli Cohen static bool mlx5_vdpa_debug;
148*1a86b377SEli Cohen 
149*1a86b377SEli Cohen #define MLX5_LOG_VIO_FLAG(_feature)                                                                \
150*1a86b377SEli Cohen 	do {                                                                                       \
151*1a86b377SEli Cohen 		if (features & BIT(_feature))                                                      \
152*1a86b377SEli Cohen 			mlx5_vdpa_info(mvdev, "%s\n", #_feature);                                  \
153*1a86b377SEli Cohen 	} while (0)
154*1a86b377SEli Cohen 
155*1a86b377SEli Cohen #define MLX5_LOG_VIO_STAT(_status)                                                                 \
156*1a86b377SEli Cohen 	do {                                                                                       \
157*1a86b377SEli Cohen 		if (status & (_status))                                                            \
158*1a86b377SEli Cohen 			mlx5_vdpa_info(mvdev, "%s\n", #_status);                                   \
159*1a86b377SEli Cohen 	} while (0)
160*1a86b377SEli Cohen 
161*1a86b377SEli Cohen static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
162*1a86b377SEli Cohen {
163*1a86b377SEli Cohen 	if (status & ~VALID_STATUS_MASK)
164*1a86b377SEli Cohen 		mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
165*1a86b377SEli Cohen 			       status & ~VALID_STATUS_MASK);
166*1a86b377SEli Cohen 
167*1a86b377SEli Cohen 	if (!mlx5_vdpa_debug)
168*1a86b377SEli Cohen 		return;
169*1a86b377SEli Cohen 
170*1a86b377SEli Cohen 	mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
171*1a86b377SEli Cohen 	if (set && !status) {
172*1a86b377SEli Cohen 		mlx5_vdpa_info(mvdev, "driver resets the device\n");
173*1a86b377SEli Cohen 		return;
174*1a86b377SEli Cohen 	}
175*1a86b377SEli Cohen 
176*1a86b377SEli Cohen 	MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
177*1a86b377SEli Cohen 	MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
178*1a86b377SEli Cohen 	MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
179*1a86b377SEli Cohen 	MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
180*1a86b377SEli Cohen 	MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
181*1a86b377SEli Cohen 	MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
182*1a86b377SEli Cohen }
183*1a86b377SEli Cohen 
184*1a86b377SEli Cohen static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
185*1a86b377SEli Cohen {
186*1a86b377SEli Cohen 	if (features & ~VALID_FEATURES_MASK)
187*1a86b377SEli Cohen 		mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
188*1a86b377SEli Cohen 			       features & ~VALID_FEATURES_MASK);
189*1a86b377SEli Cohen 
190*1a86b377SEli Cohen 	if (!mlx5_vdpa_debug)
191*1a86b377SEli Cohen 		return;
192*1a86b377SEli Cohen 
193*1a86b377SEli Cohen 	mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
194*1a86b377SEli Cohen 	if (!features)
195*1a86b377SEli Cohen 		mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
196*1a86b377SEli Cohen 
197*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
198*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
199*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
200*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
201*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
202*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
203*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
204*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
205*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
206*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
207*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
208*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
209*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
210*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
211*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
212*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
213*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
214*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
215*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
216*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
217*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
218*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
219*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
220*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
221*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
222*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
223*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
224*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
225*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
226*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
227*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
228*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
229*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
230*1a86b377SEli Cohen 	MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
231*1a86b377SEli Cohen }
232*1a86b377SEli Cohen 
233*1a86b377SEli Cohen static int create_tis(struct mlx5_vdpa_net *ndev)
234*1a86b377SEli Cohen {
235*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
236*1a86b377SEli Cohen 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
237*1a86b377SEli Cohen 	void *tisc;
238*1a86b377SEli Cohen 	int err;
239*1a86b377SEli Cohen 
240*1a86b377SEli Cohen 	tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
241*1a86b377SEli Cohen 	MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
242*1a86b377SEli Cohen 	err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
243*1a86b377SEli Cohen 	if (err)
244*1a86b377SEli Cohen 		mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
245*1a86b377SEli Cohen 
246*1a86b377SEli Cohen 	return err;
247*1a86b377SEli Cohen }
248*1a86b377SEli Cohen 
249*1a86b377SEli Cohen static void destroy_tis(struct mlx5_vdpa_net *ndev)
250*1a86b377SEli Cohen {
251*1a86b377SEli Cohen 	mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
252*1a86b377SEli Cohen }
253*1a86b377SEli Cohen 
254*1a86b377SEli Cohen #define MLX5_VDPA_CQE_SIZE 64
255*1a86b377SEli Cohen #define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
256*1a86b377SEli Cohen 
257*1a86b377SEli Cohen static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
258*1a86b377SEli Cohen {
259*1a86b377SEli Cohen 	struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
260*1a86b377SEli Cohen 	u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
261*1a86b377SEli Cohen 	u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
262*1a86b377SEli Cohen 	int err;
263*1a86b377SEli Cohen 
264*1a86b377SEli Cohen 	err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
265*1a86b377SEli Cohen 				       ndev->mvdev.mdev->priv.numa_node);
266*1a86b377SEli Cohen 	if (err)
267*1a86b377SEli Cohen 		return err;
268*1a86b377SEli Cohen 
269*1a86b377SEli Cohen 	mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
270*1a86b377SEli Cohen 
271*1a86b377SEli Cohen 	buf->cqe_size = MLX5_VDPA_CQE_SIZE;
272*1a86b377SEli Cohen 	buf->nent = nent;
273*1a86b377SEli Cohen 
274*1a86b377SEli Cohen 	return 0;
275*1a86b377SEli Cohen }
276*1a86b377SEli Cohen 
277*1a86b377SEli Cohen static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
278*1a86b377SEli Cohen {
279*1a86b377SEli Cohen 	struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
280*1a86b377SEli Cohen 
281*1a86b377SEli Cohen 	return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
282*1a86b377SEli Cohen 					ndev->mvdev.mdev->priv.numa_node);
283*1a86b377SEli Cohen }
284*1a86b377SEli Cohen 
285*1a86b377SEli Cohen static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
286*1a86b377SEli Cohen {
287*1a86b377SEli Cohen 	mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
288*1a86b377SEli Cohen }
289*1a86b377SEli Cohen 
290*1a86b377SEli Cohen static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
291*1a86b377SEli Cohen {
292*1a86b377SEli Cohen 	return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
293*1a86b377SEli Cohen }
294*1a86b377SEli Cohen 
295*1a86b377SEli Cohen static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
296*1a86b377SEli Cohen {
297*1a86b377SEli Cohen 	struct mlx5_cqe64 *cqe64;
298*1a86b377SEli Cohen 	void *cqe;
299*1a86b377SEli Cohen 	int i;
300*1a86b377SEli Cohen 
301*1a86b377SEli Cohen 	for (i = 0; i < buf->nent; i++) {
302*1a86b377SEli Cohen 		cqe = get_cqe(vcq, i);
303*1a86b377SEli Cohen 		cqe64 = cqe;
304*1a86b377SEli Cohen 		cqe64->op_own = MLX5_CQE_INVALID << 4;
305*1a86b377SEli Cohen 	}
306*1a86b377SEli Cohen }
307*1a86b377SEli Cohen 
308*1a86b377SEli Cohen static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
309*1a86b377SEli Cohen {
310*1a86b377SEli Cohen 	struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
311*1a86b377SEli Cohen 
312*1a86b377SEli Cohen 	if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
313*1a86b377SEli Cohen 	    !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
314*1a86b377SEli Cohen 		return cqe64;
315*1a86b377SEli Cohen 
316*1a86b377SEli Cohen 	return NULL;
317*1a86b377SEli Cohen }
318*1a86b377SEli Cohen 
319*1a86b377SEli Cohen static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
320*1a86b377SEli Cohen {
321*1a86b377SEli Cohen 	vqp->head += n;
322*1a86b377SEli Cohen 	vqp->db.db[0] = cpu_to_be32(vqp->head);
323*1a86b377SEli Cohen }
324*1a86b377SEli Cohen 
325*1a86b377SEli Cohen static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
326*1a86b377SEli Cohen 		       struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
327*1a86b377SEli Cohen {
328*1a86b377SEli Cohen 	struct mlx5_vdpa_qp *vqp;
329*1a86b377SEli Cohen 	__be64 *pas;
330*1a86b377SEli Cohen 	void *qpc;
331*1a86b377SEli Cohen 
332*1a86b377SEli Cohen 	vqp = fw ? &mvq->fwqp : &mvq->vqqp;
333*1a86b377SEli Cohen 	MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
334*1a86b377SEli Cohen 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
335*1a86b377SEli Cohen 	if (vqp->fw) {
336*1a86b377SEli Cohen 		/* Firmware QP is allocated by the driver for the firmware's
337*1a86b377SEli Cohen 		 * use so we can skip part of the params as they will be chosen by firmware
338*1a86b377SEli Cohen 		 */
339*1a86b377SEli Cohen 		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
340*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
341*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, no_sq, 1);
342*1a86b377SEli Cohen 		return;
343*1a86b377SEli Cohen 	}
344*1a86b377SEli Cohen 
345*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
346*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
347*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
348*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
349*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
350*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
351*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, no_sq, 1);
352*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
353*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
354*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
355*1a86b377SEli Cohen 	pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
356*1a86b377SEli Cohen 	mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
357*1a86b377SEli Cohen }
358*1a86b377SEli Cohen 
359*1a86b377SEli Cohen static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
360*1a86b377SEli Cohen {
361*1a86b377SEli Cohen 	return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
362*1a86b377SEli Cohen 					num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
363*1a86b377SEli Cohen 					ndev->mvdev.mdev->priv.numa_node);
364*1a86b377SEli Cohen }
365*1a86b377SEli Cohen 
366*1a86b377SEli Cohen static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
367*1a86b377SEli Cohen {
368*1a86b377SEli Cohen 	mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
369*1a86b377SEli Cohen }
370*1a86b377SEli Cohen 
371*1a86b377SEli Cohen static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
372*1a86b377SEli Cohen 		     struct mlx5_vdpa_qp *vqp)
373*1a86b377SEli Cohen {
374*1a86b377SEli Cohen 	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
375*1a86b377SEli Cohen 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
376*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
377*1a86b377SEli Cohen 	void *qpc;
378*1a86b377SEli Cohen 	void *in;
379*1a86b377SEli Cohen 	int err;
380*1a86b377SEli Cohen 
381*1a86b377SEli Cohen 	if (!vqp->fw) {
382*1a86b377SEli Cohen 		vqp = &mvq->vqqp;
383*1a86b377SEli Cohen 		err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
384*1a86b377SEli Cohen 		if (err)
385*1a86b377SEli Cohen 			return err;
386*1a86b377SEli Cohen 
387*1a86b377SEli Cohen 		err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
388*1a86b377SEli Cohen 		if (err)
389*1a86b377SEli Cohen 			goto err_db;
390*1a86b377SEli Cohen 		inlen += vqp->frag_buf.npages * sizeof(__be64);
391*1a86b377SEli Cohen 	}
392*1a86b377SEli Cohen 
393*1a86b377SEli Cohen 	in = kzalloc(inlen, GFP_KERNEL);
394*1a86b377SEli Cohen 	if (!in) {
395*1a86b377SEli Cohen 		err = -ENOMEM;
396*1a86b377SEli Cohen 		goto err_kzalloc;
397*1a86b377SEli Cohen 	}
398*1a86b377SEli Cohen 
399*1a86b377SEli Cohen 	qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
400*1a86b377SEli Cohen 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
401*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
402*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
403*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
404*1a86b377SEli Cohen 	MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
405*1a86b377SEli Cohen 	if (!vqp->fw)
406*1a86b377SEli Cohen 		MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
407*1a86b377SEli Cohen 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
408*1a86b377SEli Cohen 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
409*1a86b377SEli Cohen 	kfree(in);
410*1a86b377SEli Cohen 	if (err)
411*1a86b377SEli Cohen 		goto err_kzalloc;
412*1a86b377SEli Cohen 
413*1a86b377SEli Cohen 	vqp->mqp.uid = ndev->mvdev.res.uid;
414*1a86b377SEli Cohen 	vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
415*1a86b377SEli Cohen 
416*1a86b377SEli Cohen 	if (!vqp->fw)
417*1a86b377SEli Cohen 		rx_post(vqp, mvq->num_ent);
418*1a86b377SEli Cohen 
419*1a86b377SEli Cohen 	return 0;
420*1a86b377SEli Cohen 
421*1a86b377SEli Cohen err_kzalloc:
422*1a86b377SEli Cohen 	if (!vqp->fw)
423*1a86b377SEli Cohen 		mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
424*1a86b377SEli Cohen err_db:
425*1a86b377SEli Cohen 	if (!vqp->fw)
426*1a86b377SEli Cohen 		rq_buf_free(ndev, vqp);
427*1a86b377SEli Cohen 
428*1a86b377SEli Cohen 	return err;
429*1a86b377SEli Cohen }
430*1a86b377SEli Cohen 
431*1a86b377SEli Cohen static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
432*1a86b377SEli Cohen {
433*1a86b377SEli Cohen 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
434*1a86b377SEli Cohen 
435*1a86b377SEli Cohen 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
436*1a86b377SEli Cohen 	MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
437*1a86b377SEli Cohen 	MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
438*1a86b377SEli Cohen 	if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
439*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
440*1a86b377SEli Cohen 	if (!vqp->fw) {
441*1a86b377SEli Cohen 		mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
442*1a86b377SEli Cohen 		rq_buf_free(ndev, vqp);
443*1a86b377SEli Cohen 	}
444*1a86b377SEli Cohen }
445*1a86b377SEli Cohen 
446*1a86b377SEli Cohen static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
447*1a86b377SEli Cohen {
448*1a86b377SEli Cohen 	return get_sw_cqe(cq, cq->mcq.cons_index);
449*1a86b377SEli Cohen }
450*1a86b377SEli Cohen 
451*1a86b377SEli Cohen static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
452*1a86b377SEli Cohen {
453*1a86b377SEli Cohen 	struct mlx5_cqe64 *cqe64;
454*1a86b377SEli Cohen 
455*1a86b377SEli Cohen 	cqe64 = next_cqe_sw(vcq);
456*1a86b377SEli Cohen 	if (!cqe64)
457*1a86b377SEli Cohen 		return -EAGAIN;
458*1a86b377SEli Cohen 
459*1a86b377SEli Cohen 	vcq->mcq.cons_index++;
460*1a86b377SEli Cohen 	return 0;
461*1a86b377SEli Cohen }
462*1a86b377SEli Cohen 
463*1a86b377SEli Cohen static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
464*1a86b377SEli Cohen {
465*1a86b377SEli Cohen 	mlx5_cq_set_ci(&mvq->cq.mcq);
466*1a86b377SEli Cohen 	rx_post(&mvq->vqqp, num);
467*1a86b377SEli Cohen 	if (mvq->event_cb.callback)
468*1a86b377SEli Cohen 		mvq->event_cb.callback(mvq->event_cb.private);
469*1a86b377SEli Cohen }
470*1a86b377SEli Cohen 
471*1a86b377SEli Cohen static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
472*1a86b377SEli Cohen {
473*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
474*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = mvq->ndev;
475*1a86b377SEli Cohen 	void __iomem *uar_page = ndev->mvdev.res.uar->map;
476*1a86b377SEli Cohen 	int num = 0;
477*1a86b377SEli Cohen 
478*1a86b377SEli Cohen 	while (!mlx5_vdpa_poll_one(&mvq->cq)) {
479*1a86b377SEli Cohen 		num++;
480*1a86b377SEli Cohen 		if (num > mvq->num_ent / 2) {
481*1a86b377SEli Cohen 			/* If completions keep coming while we poll, we want to
482*1a86b377SEli Cohen 			 * let the hardware know that we consumed them by
483*1a86b377SEli Cohen 			 * updating the doorbell record.  We also let vdpa core
484*1a86b377SEli Cohen 			 * know about this so it passes it on the virtio driver
485*1a86b377SEli Cohen 			 * on the guest.
486*1a86b377SEli Cohen 			 */
487*1a86b377SEli Cohen 			mlx5_vdpa_handle_completions(mvq, num);
488*1a86b377SEli Cohen 			num = 0;
489*1a86b377SEli Cohen 		}
490*1a86b377SEli Cohen 	}
491*1a86b377SEli Cohen 
492*1a86b377SEli Cohen 	if (num)
493*1a86b377SEli Cohen 		mlx5_vdpa_handle_completions(mvq, num);
494*1a86b377SEli Cohen 
495*1a86b377SEli Cohen 	mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
496*1a86b377SEli Cohen }
497*1a86b377SEli Cohen 
498*1a86b377SEli Cohen static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
499*1a86b377SEli Cohen {
500*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
501*1a86b377SEli Cohen 	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
502*1a86b377SEli Cohen 	void __iomem *uar_page = ndev->mvdev.res.uar->map;
503*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
504*1a86b377SEli Cohen 	struct mlx5_vdpa_cq *vcq = &mvq->cq;
505*1a86b377SEli Cohen 	unsigned int irqn;
506*1a86b377SEli Cohen 	__be64 *pas;
507*1a86b377SEli Cohen 	int inlen;
508*1a86b377SEli Cohen 	void *cqc;
509*1a86b377SEli Cohen 	void *in;
510*1a86b377SEli Cohen 	int err;
511*1a86b377SEli Cohen 	int eqn;
512*1a86b377SEli Cohen 
513*1a86b377SEli Cohen 	err = mlx5_db_alloc(mdev, &vcq->db);
514*1a86b377SEli Cohen 	if (err)
515*1a86b377SEli Cohen 		return err;
516*1a86b377SEli Cohen 
517*1a86b377SEli Cohen 	vcq->mcq.set_ci_db = vcq->db.db;
518*1a86b377SEli Cohen 	vcq->mcq.arm_db = vcq->db.db + 1;
519*1a86b377SEli Cohen 	vcq->mcq.cqe_sz = 64;
520*1a86b377SEli Cohen 
521*1a86b377SEli Cohen 	err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
522*1a86b377SEli Cohen 	if (err)
523*1a86b377SEli Cohen 		goto err_db;
524*1a86b377SEli Cohen 
525*1a86b377SEli Cohen 	cq_frag_buf_init(vcq, &vcq->buf);
526*1a86b377SEli Cohen 
527*1a86b377SEli Cohen 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
528*1a86b377SEli Cohen 		MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
529*1a86b377SEli Cohen 	in = kzalloc(inlen, GFP_KERNEL);
530*1a86b377SEli Cohen 	if (!in) {
531*1a86b377SEli Cohen 		err = -ENOMEM;
532*1a86b377SEli Cohen 		goto err_vzalloc;
533*1a86b377SEli Cohen 	}
534*1a86b377SEli Cohen 
535*1a86b377SEli Cohen 	MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
536*1a86b377SEli Cohen 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
537*1a86b377SEli Cohen 	mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
538*1a86b377SEli Cohen 
539*1a86b377SEli Cohen 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
540*1a86b377SEli Cohen 	MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
541*1a86b377SEli Cohen 
542*1a86b377SEli Cohen 	/* Use vector 0 by default. Consider adding code to choose least used
543*1a86b377SEli Cohen 	 * vector.
544*1a86b377SEli Cohen 	 */
545*1a86b377SEli Cohen 	err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
546*1a86b377SEli Cohen 	if (err)
547*1a86b377SEli Cohen 		goto err_vec;
548*1a86b377SEli Cohen 
549*1a86b377SEli Cohen 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
550*1a86b377SEli Cohen 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
551*1a86b377SEli Cohen 	MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
552*1a86b377SEli Cohen 	MLX5_SET(cqc, cqc, c_eqn, eqn);
553*1a86b377SEli Cohen 	MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
554*1a86b377SEli Cohen 
555*1a86b377SEli Cohen 	err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
556*1a86b377SEli Cohen 	if (err)
557*1a86b377SEli Cohen 		goto err_vec;
558*1a86b377SEli Cohen 
559*1a86b377SEli Cohen 	vcq->mcq.comp = mlx5_vdpa_cq_comp;
560*1a86b377SEli Cohen 	vcq->cqe = num_ent;
561*1a86b377SEli Cohen 	vcq->mcq.set_ci_db = vcq->db.db;
562*1a86b377SEli Cohen 	vcq->mcq.arm_db = vcq->db.db + 1;
563*1a86b377SEli Cohen 	mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
564*1a86b377SEli Cohen 	kfree(in);
565*1a86b377SEli Cohen 	return 0;
566*1a86b377SEli Cohen 
567*1a86b377SEli Cohen err_vec:
568*1a86b377SEli Cohen 	kfree(in);
569*1a86b377SEli Cohen err_vzalloc:
570*1a86b377SEli Cohen 	cq_frag_buf_free(ndev, &vcq->buf);
571*1a86b377SEli Cohen err_db:
572*1a86b377SEli Cohen 	mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
573*1a86b377SEli Cohen 	return err;
574*1a86b377SEli Cohen }
575*1a86b377SEli Cohen 
576*1a86b377SEli Cohen static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
577*1a86b377SEli Cohen {
578*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
579*1a86b377SEli Cohen 	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
580*1a86b377SEli Cohen 	struct mlx5_vdpa_cq *vcq = &mvq->cq;
581*1a86b377SEli Cohen 
582*1a86b377SEli Cohen 	if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
583*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
584*1a86b377SEli Cohen 		return;
585*1a86b377SEli Cohen 	}
586*1a86b377SEli Cohen 	cq_frag_buf_free(ndev, &vcq->buf);
587*1a86b377SEli Cohen 	mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
588*1a86b377SEli Cohen }
589*1a86b377SEli Cohen 
590*1a86b377SEli Cohen static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
591*1a86b377SEli Cohen 		     struct mlx5_vdpa_umem **umemp)
592*1a86b377SEli Cohen {
593*1a86b377SEli Cohen 	struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
594*1a86b377SEli Cohen 	int p_a;
595*1a86b377SEli Cohen 	int p_b;
596*1a86b377SEli Cohen 
597*1a86b377SEli Cohen 	switch (num) {
598*1a86b377SEli Cohen 	case 1:
599*1a86b377SEli Cohen 		p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
600*1a86b377SEli Cohen 		p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
601*1a86b377SEli Cohen 		*umemp = &mvq->umem1;
602*1a86b377SEli Cohen 		break;
603*1a86b377SEli Cohen 	case 2:
604*1a86b377SEli Cohen 		p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
605*1a86b377SEli Cohen 		p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
606*1a86b377SEli Cohen 		*umemp = &mvq->umem2;
607*1a86b377SEli Cohen 		break;
608*1a86b377SEli Cohen 	case 3:
609*1a86b377SEli Cohen 		p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
610*1a86b377SEli Cohen 		p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
611*1a86b377SEli Cohen 		*umemp = &mvq->umem3;
612*1a86b377SEli Cohen 		break;
613*1a86b377SEli Cohen 	}
614*1a86b377SEli Cohen 	return p_a * mvq->num_ent + p_b;
615*1a86b377SEli Cohen }
616*1a86b377SEli Cohen 
617*1a86b377SEli Cohen static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
618*1a86b377SEli Cohen {
619*1a86b377SEli Cohen 	mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
620*1a86b377SEli Cohen }
621*1a86b377SEli Cohen 
622*1a86b377SEli Cohen static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
623*1a86b377SEli Cohen {
624*1a86b377SEli Cohen 	int inlen;
625*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
626*1a86b377SEli Cohen 	void *um;
627*1a86b377SEli Cohen 	void *in;
628*1a86b377SEli Cohen 	int err;
629*1a86b377SEli Cohen 	__be64 *pas;
630*1a86b377SEli Cohen 	int size;
631*1a86b377SEli Cohen 	struct mlx5_vdpa_umem *umem;
632*1a86b377SEli Cohen 
633*1a86b377SEli Cohen 	size = umem_size(ndev, mvq, num, &umem);
634*1a86b377SEli Cohen 	if (size < 0)
635*1a86b377SEli Cohen 		return size;
636*1a86b377SEli Cohen 
637*1a86b377SEli Cohen 	umem->size = size;
638*1a86b377SEli Cohen 	err = umem_frag_buf_alloc(ndev, umem, size);
639*1a86b377SEli Cohen 	if (err)
640*1a86b377SEli Cohen 		return err;
641*1a86b377SEli Cohen 
642*1a86b377SEli Cohen 	inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
643*1a86b377SEli Cohen 
644*1a86b377SEli Cohen 	in = kzalloc(inlen, GFP_KERNEL);
645*1a86b377SEli Cohen 	if (!in) {
646*1a86b377SEli Cohen 		err = -ENOMEM;
647*1a86b377SEli Cohen 		goto err_in;
648*1a86b377SEli Cohen 	}
649*1a86b377SEli Cohen 
650*1a86b377SEli Cohen 	MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
651*1a86b377SEli Cohen 	MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
652*1a86b377SEli Cohen 	um = MLX5_ADDR_OF(create_umem_in, in, umem);
653*1a86b377SEli Cohen 	MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
654*1a86b377SEli Cohen 	MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
655*1a86b377SEli Cohen 
656*1a86b377SEli Cohen 	pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
657*1a86b377SEli Cohen 	mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
658*1a86b377SEli Cohen 
659*1a86b377SEli Cohen 	err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
660*1a86b377SEli Cohen 	if (err) {
661*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
662*1a86b377SEli Cohen 		goto err_cmd;
663*1a86b377SEli Cohen 	}
664*1a86b377SEli Cohen 
665*1a86b377SEli Cohen 	kfree(in);
666*1a86b377SEli Cohen 	umem->id = MLX5_GET(create_umem_out, out, umem_id);
667*1a86b377SEli Cohen 
668*1a86b377SEli Cohen 	return 0;
669*1a86b377SEli Cohen 
670*1a86b377SEli Cohen err_cmd:
671*1a86b377SEli Cohen 	kfree(in);
672*1a86b377SEli Cohen err_in:
673*1a86b377SEli Cohen 	umem_frag_buf_free(ndev, umem);
674*1a86b377SEli Cohen 	return err;
675*1a86b377SEli Cohen }
676*1a86b377SEli Cohen 
677*1a86b377SEli Cohen static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
678*1a86b377SEli Cohen {
679*1a86b377SEli Cohen 	u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
680*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
681*1a86b377SEli Cohen 	struct mlx5_vdpa_umem *umem;
682*1a86b377SEli Cohen 
683*1a86b377SEli Cohen 	switch (num) {
684*1a86b377SEli Cohen 	case 1:
685*1a86b377SEli Cohen 		umem = &mvq->umem1;
686*1a86b377SEli Cohen 		break;
687*1a86b377SEli Cohen 	case 2:
688*1a86b377SEli Cohen 		umem = &mvq->umem2;
689*1a86b377SEli Cohen 		break;
690*1a86b377SEli Cohen 	case 3:
691*1a86b377SEli Cohen 		umem = &mvq->umem3;
692*1a86b377SEli Cohen 		break;
693*1a86b377SEli Cohen 	}
694*1a86b377SEli Cohen 
695*1a86b377SEli Cohen 	MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
696*1a86b377SEli Cohen 	MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
697*1a86b377SEli Cohen 	if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
698*1a86b377SEli Cohen 		return;
699*1a86b377SEli Cohen 
700*1a86b377SEli Cohen 	umem_frag_buf_free(ndev, umem);
701*1a86b377SEli Cohen }
702*1a86b377SEli Cohen 
703*1a86b377SEli Cohen static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
704*1a86b377SEli Cohen {
705*1a86b377SEli Cohen 	int num;
706*1a86b377SEli Cohen 	int err;
707*1a86b377SEli Cohen 
708*1a86b377SEli Cohen 	for (num = 1; num <= 3; num++) {
709*1a86b377SEli Cohen 		err = create_umem(ndev, mvq, num);
710*1a86b377SEli Cohen 		if (err)
711*1a86b377SEli Cohen 			goto err_umem;
712*1a86b377SEli Cohen 	}
713*1a86b377SEli Cohen 	return 0;
714*1a86b377SEli Cohen 
715*1a86b377SEli Cohen err_umem:
716*1a86b377SEli Cohen 	for (num--; num > 0; num--)
717*1a86b377SEli Cohen 		umem_destroy(ndev, mvq, num);
718*1a86b377SEli Cohen 
719*1a86b377SEli Cohen 	return err;
720*1a86b377SEli Cohen }
721*1a86b377SEli Cohen 
722*1a86b377SEli Cohen static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
723*1a86b377SEli Cohen {
724*1a86b377SEli Cohen 	int num;
725*1a86b377SEli Cohen 
726*1a86b377SEli Cohen 	for (num = 3; num > 0; num--)
727*1a86b377SEli Cohen 		umem_destroy(ndev, mvq, num);
728*1a86b377SEli Cohen }
729*1a86b377SEli Cohen 
730*1a86b377SEli Cohen static int get_queue_type(struct mlx5_vdpa_net *ndev)
731*1a86b377SEli Cohen {
732*1a86b377SEli Cohen 	u32 type_mask;
733*1a86b377SEli Cohen 
734*1a86b377SEli Cohen 	type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
735*1a86b377SEli Cohen 
736*1a86b377SEli Cohen 	/* prefer split queue */
737*1a86b377SEli Cohen 	if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
738*1a86b377SEli Cohen 		return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
739*1a86b377SEli Cohen 
740*1a86b377SEli Cohen 	WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
741*1a86b377SEli Cohen 
742*1a86b377SEli Cohen 	return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
743*1a86b377SEli Cohen }
744*1a86b377SEli Cohen 
745*1a86b377SEli Cohen static bool vq_is_tx(u16 idx)
746*1a86b377SEli Cohen {
747*1a86b377SEli Cohen 	return idx % 2;
748*1a86b377SEli Cohen }
749*1a86b377SEli Cohen 
750*1a86b377SEli Cohen static u16 get_features_12_3(u64 features)
751*1a86b377SEli Cohen {
752*1a86b377SEli Cohen 	return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) |
753*1a86b377SEli Cohen 	       (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) |
754*1a86b377SEli Cohen 	       (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) |
755*1a86b377SEli Cohen 	       (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6);
756*1a86b377SEli Cohen }
757*1a86b377SEli Cohen 
758*1a86b377SEli Cohen static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
759*1a86b377SEli Cohen {
760*1a86b377SEli Cohen 	int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
761*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
762*1a86b377SEli Cohen 	void *obj_context;
763*1a86b377SEli Cohen 	void *cmd_hdr;
764*1a86b377SEli Cohen 	void *vq_ctx;
765*1a86b377SEli Cohen 	void *in;
766*1a86b377SEli Cohen 	int err;
767*1a86b377SEli Cohen 
768*1a86b377SEli Cohen 	err = umems_create(ndev, mvq);
769*1a86b377SEli Cohen 	if (err)
770*1a86b377SEli Cohen 		return err;
771*1a86b377SEli Cohen 
772*1a86b377SEli Cohen 	in = kzalloc(inlen, GFP_KERNEL);
773*1a86b377SEli Cohen 	if (!in) {
774*1a86b377SEli Cohen 		err = -ENOMEM;
775*1a86b377SEli Cohen 		goto err_alloc;
776*1a86b377SEli Cohen 	}
777*1a86b377SEli Cohen 
778*1a86b377SEli Cohen 	cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
779*1a86b377SEli Cohen 
780*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
781*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
782*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
783*1a86b377SEli Cohen 
784*1a86b377SEli Cohen 	obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
785*1a86b377SEli Cohen 	MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
786*1a86b377SEli Cohen 	MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
787*1a86b377SEli Cohen 		 get_features_12_3(ndev->mvdev.actual_features));
788*1a86b377SEli Cohen 	vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
789*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
790*1a86b377SEli Cohen 
791*1a86b377SEli Cohen 	if (vq_is_tx(mvq->index))
792*1a86b377SEli Cohen 		MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
793*1a86b377SEli Cohen 
794*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
795*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
796*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
797*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
798*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
799*1a86b377SEli Cohen 		 !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
800*1a86b377SEli Cohen 	MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
801*1a86b377SEli Cohen 	MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
802*1a86b377SEli Cohen 	MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
803*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
804*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
805*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
806*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
807*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
808*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
809*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
810*1a86b377SEli Cohen 	MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
811*1a86b377SEli Cohen 	if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
812*1a86b377SEli Cohen 		MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
813*1a86b377SEli Cohen 
814*1a86b377SEli Cohen 	err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
815*1a86b377SEli Cohen 	if (err)
816*1a86b377SEli Cohen 		goto err_cmd;
817*1a86b377SEli Cohen 
818*1a86b377SEli Cohen 	kfree(in);
819*1a86b377SEli Cohen 	mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
820*1a86b377SEli Cohen 
821*1a86b377SEli Cohen 	return 0;
822*1a86b377SEli Cohen 
823*1a86b377SEli Cohen err_cmd:
824*1a86b377SEli Cohen 	kfree(in);
825*1a86b377SEli Cohen err_alloc:
826*1a86b377SEli Cohen 	umems_destroy(ndev, mvq);
827*1a86b377SEli Cohen 	return err;
828*1a86b377SEli Cohen }
829*1a86b377SEli Cohen 
830*1a86b377SEli Cohen static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
831*1a86b377SEli Cohen {
832*1a86b377SEli Cohen 	u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
833*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
834*1a86b377SEli Cohen 
835*1a86b377SEli Cohen 	MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
836*1a86b377SEli Cohen 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
837*1a86b377SEli Cohen 	MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
838*1a86b377SEli Cohen 	MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
839*1a86b377SEli Cohen 	MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
840*1a86b377SEli Cohen 		 MLX5_OBJ_TYPE_VIRTIO_NET_Q);
841*1a86b377SEli Cohen 	if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
842*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
843*1a86b377SEli Cohen 		return;
844*1a86b377SEli Cohen 	}
845*1a86b377SEli Cohen 	umems_destroy(ndev, mvq);
846*1a86b377SEli Cohen }
847*1a86b377SEli Cohen 
848*1a86b377SEli Cohen static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
849*1a86b377SEli Cohen {
850*1a86b377SEli Cohen 	return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
851*1a86b377SEli Cohen }
852*1a86b377SEli Cohen 
853*1a86b377SEli Cohen static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
854*1a86b377SEli Cohen {
855*1a86b377SEli Cohen 	return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
856*1a86b377SEli Cohen }
857*1a86b377SEli Cohen 
858*1a86b377SEli Cohen static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
859*1a86b377SEli Cohen 			int *outlen, u32 qpn, u32 rqpn)
860*1a86b377SEli Cohen {
861*1a86b377SEli Cohen 	void *qpc;
862*1a86b377SEli Cohen 	void *pp;
863*1a86b377SEli Cohen 
864*1a86b377SEli Cohen 	switch (cmd) {
865*1a86b377SEli Cohen 	case MLX5_CMD_OP_2RST_QP:
866*1a86b377SEli Cohen 		*inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
867*1a86b377SEli Cohen 		*outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
868*1a86b377SEli Cohen 		*in = kzalloc(*inlen, GFP_KERNEL);
869*1a86b377SEli Cohen 		*out = kzalloc(*outlen, GFP_KERNEL);
870*1a86b377SEli Cohen 		if (!in || !out)
871*1a86b377SEli Cohen 			goto outerr;
872*1a86b377SEli Cohen 
873*1a86b377SEli Cohen 		MLX5_SET(qp_2rst_in, *in, opcode, cmd);
874*1a86b377SEli Cohen 		MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
875*1a86b377SEli Cohen 		MLX5_SET(qp_2rst_in, *in, qpn, qpn);
876*1a86b377SEli Cohen 		break;
877*1a86b377SEli Cohen 	case MLX5_CMD_OP_RST2INIT_QP:
878*1a86b377SEli Cohen 		*inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
879*1a86b377SEli Cohen 		*outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
880*1a86b377SEli Cohen 		*in = kzalloc(*inlen, GFP_KERNEL);
881*1a86b377SEli Cohen 		*out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
882*1a86b377SEli Cohen 		if (!in || !out)
883*1a86b377SEli Cohen 			goto outerr;
884*1a86b377SEli Cohen 
885*1a86b377SEli Cohen 		MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
886*1a86b377SEli Cohen 		MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
887*1a86b377SEli Cohen 		MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
888*1a86b377SEli Cohen 		qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
889*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, remote_qpn, rqpn);
890*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, rwe, 1);
891*1a86b377SEli Cohen 		pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
892*1a86b377SEli Cohen 		MLX5_SET(ads, pp, vhca_port_num, 1);
893*1a86b377SEli Cohen 		break;
894*1a86b377SEli Cohen 	case MLX5_CMD_OP_INIT2RTR_QP:
895*1a86b377SEli Cohen 		*inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
896*1a86b377SEli Cohen 		*outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
897*1a86b377SEli Cohen 		*in = kzalloc(*inlen, GFP_KERNEL);
898*1a86b377SEli Cohen 		*out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
899*1a86b377SEli Cohen 		if (!in || !out)
900*1a86b377SEli Cohen 			goto outerr;
901*1a86b377SEli Cohen 
902*1a86b377SEli Cohen 		MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
903*1a86b377SEli Cohen 		MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
904*1a86b377SEli Cohen 		MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
905*1a86b377SEli Cohen 		qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
906*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
907*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, log_msg_max, 30);
908*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, remote_qpn, rqpn);
909*1a86b377SEli Cohen 		pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
910*1a86b377SEli Cohen 		MLX5_SET(ads, pp, fl, 1);
911*1a86b377SEli Cohen 		break;
912*1a86b377SEli Cohen 	case MLX5_CMD_OP_RTR2RTS_QP:
913*1a86b377SEli Cohen 		*inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
914*1a86b377SEli Cohen 		*outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
915*1a86b377SEli Cohen 		*in = kzalloc(*inlen, GFP_KERNEL);
916*1a86b377SEli Cohen 		*out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
917*1a86b377SEli Cohen 		if (!in || !out)
918*1a86b377SEli Cohen 			goto outerr;
919*1a86b377SEli Cohen 
920*1a86b377SEli Cohen 		MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
921*1a86b377SEli Cohen 		MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
922*1a86b377SEli Cohen 		MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
923*1a86b377SEli Cohen 		qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
924*1a86b377SEli Cohen 		pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
925*1a86b377SEli Cohen 		MLX5_SET(ads, pp, ack_timeout, 14);
926*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, retry_count, 7);
927*1a86b377SEli Cohen 		MLX5_SET(qpc, qpc, rnr_retry, 7);
928*1a86b377SEli Cohen 		break;
929*1a86b377SEli Cohen 	default:
930*1a86b377SEli Cohen 		goto outerr;
931*1a86b377SEli Cohen 	}
932*1a86b377SEli Cohen 	if (!*in || !*out)
933*1a86b377SEli Cohen 		goto outerr;
934*1a86b377SEli Cohen 
935*1a86b377SEli Cohen 	return;
936*1a86b377SEli Cohen 
937*1a86b377SEli Cohen outerr:
938*1a86b377SEli Cohen 	kfree(*in);
939*1a86b377SEli Cohen 	kfree(*out);
940*1a86b377SEli Cohen 	*in = NULL;
941*1a86b377SEli Cohen 	*out = NULL;
942*1a86b377SEli Cohen }
943*1a86b377SEli Cohen 
944*1a86b377SEli Cohen static void free_inout(void *in, void *out)
945*1a86b377SEli Cohen {
946*1a86b377SEli Cohen 	kfree(in);
947*1a86b377SEli Cohen 	kfree(out);
948*1a86b377SEli Cohen }
949*1a86b377SEli Cohen 
950*1a86b377SEli Cohen /* Two QPs are used by each virtqueue. One is used by the driver and one by
951*1a86b377SEli Cohen  * firmware. The fw argument indicates whether the subjected QP is the one used
952*1a86b377SEli Cohen  * by firmware.
953*1a86b377SEli Cohen  */
954*1a86b377SEli Cohen static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
955*1a86b377SEli Cohen {
956*1a86b377SEli Cohen 	int outlen;
957*1a86b377SEli Cohen 	int inlen;
958*1a86b377SEli Cohen 	void *out;
959*1a86b377SEli Cohen 	void *in;
960*1a86b377SEli Cohen 	int err;
961*1a86b377SEli Cohen 
962*1a86b377SEli Cohen 	alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
963*1a86b377SEli Cohen 	if (!in || !out)
964*1a86b377SEli Cohen 		return -ENOMEM;
965*1a86b377SEli Cohen 
966*1a86b377SEli Cohen 	err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
967*1a86b377SEli Cohen 	free_inout(in, out);
968*1a86b377SEli Cohen 	return err;
969*1a86b377SEli Cohen }
970*1a86b377SEli Cohen 
971*1a86b377SEli Cohen static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
972*1a86b377SEli Cohen {
973*1a86b377SEli Cohen 	int err;
974*1a86b377SEli Cohen 
975*1a86b377SEli Cohen 	err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
976*1a86b377SEli Cohen 	if (err)
977*1a86b377SEli Cohen 		return err;
978*1a86b377SEli Cohen 
979*1a86b377SEli Cohen 	err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
980*1a86b377SEli Cohen 	if (err)
981*1a86b377SEli Cohen 		return err;
982*1a86b377SEli Cohen 
983*1a86b377SEli Cohen 	err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
984*1a86b377SEli Cohen 	if (err)
985*1a86b377SEli Cohen 		return err;
986*1a86b377SEli Cohen 
987*1a86b377SEli Cohen 	err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
988*1a86b377SEli Cohen 	if (err)
989*1a86b377SEli Cohen 		return err;
990*1a86b377SEli Cohen 
991*1a86b377SEli Cohen 	err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
992*1a86b377SEli Cohen 	if (err)
993*1a86b377SEli Cohen 		return err;
994*1a86b377SEli Cohen 
995*1a86b377SEli Cohen 	err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
996*1a86b377SEli Cohen 	if (err)
997*1a86b377SEli Cohen 		return err;
998*1a86b377SEli Cohen 
999*1a86b377SEli Cohen 	return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
1000*1a86b377SEli Cohen }
1001*1a86b377SEli Cohen 
1002*1a86b377SEli Cohen struct mlx5_virtq_attr {
1003*1a86b377SEli Cohen 	u8 state;
1004*1a86b377SEli Cohen 	u16 available_index;
1005*1a86b377SEli Cohen };
1006*1a86b377SEli Cohen 
1007*1a86b377SEli Cohen static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
1008*1a86b377SEli Cohen 			   struct mlx5_virtq_attr *attr)
1009*1a86b377SEli Cohen {
1010*1a86b377SEli Cohen 	int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
1011*1a86b377SEli Cohen 	u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
1012*1a86b377SEli Cohen 	void *out;
1013*1a86b377SEli Cohen 	void *obj_context;
1014*1a86b377SEli Cohen 	void *cmd_hdr;
1015*1a86b377SEli Cohen 	int err;
1016*1a86b377SEli Cohen 
1017*1a86b377SEli Cohen 	out = kzalloc(outlen, GFP_KERNEL);
1018*1a86b377SEli Cohen 	if (!out)
1019*1a86b377SEli Cohen 		return -ENOMEM;
1020*1a86b377SEli Cohen 
1021*1a86b377SEli Cohen 	cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1022*1a86b377SEli Cohen 
1023*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1024*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
1025*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1026*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1027*1a86b377SEli Cohen 	err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
1028*1a86b377SEli Cohen 	if (err)
1029*1a86b377SEli Cohen 		goto err_cmd;
1030*1a86b377SEli Cohen 
1031*1a86b377SEli Cohen 	obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
1032*1a86b377SEli Cohen 	memset(attr, 0, sizeof(*attr));
1033*1a86b377SEli Cohen 	attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
1034*1a86b377SEli Cohen 	attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
1035*1a86b377SEli Cohen 	kfree(out);
1036*1a86b377SEli Cohen 	return 0;
1037*1a86b377SEli Cohen 
1038*1a86b377SEli Cohen err_cmd:
1039*1a86b377SEli Cohen 	kfree(out);
1040*1a86b377SEli Cohen 	return err;
1041*1a86b377SEli Cohen }
1042*1a86b377SEli Cohen 
1043*1a86b377SEli Cohen static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
1044*1a86b377SEli Cohen {
1045*1a86b377SEli Cohen 	int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
1046*1a86b377SEli Cohen 	u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
1047*1a86b377SEli Cohen 	void *obj_context;
1048*1a86b377SEli Cohen 	void *cmd_hdr;
1049*1a86b377SEli Cohen 	void *in;
1050*1a86b377SEli Cohen 	int err;
1051*1a86b377SEli Cohen 
1052*1a86b377SEli Cohen 	in = kzalloc(inlen, GFP_KERNEL);
1053*1a86b377SEli Cohen 	if (!in)
1054*1a86b377SEli Cohen 		return -ENOMEM;
1055*1a86b377SEli Cohen 
1056*1a86b377SEli Cohen 	cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
1057*1a86b377SEli Cohen 
1058*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1059*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
1060*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1061*1a86b377SEli Cohen 	MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1062*1a86b377SEli Cohen 
1063*1a86b377SEli Cohen 	obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
1064*1a86b377SEli Cohen 	MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
1065*1a86b377SEli Cohen 		   MLX5_VIRTQ_MODIFY_MASK_STATE);
1066*1a86b377SEli Cohen 	MLX5_SET(virtio_net_q_object, obj_context, state, state);
1067*1a86b377SEli Cohen 	err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1068*1a86b377SEli Cohen 	kfree(in);
1069*1a86b377SEli Cohen 	if (!err)
1070*1a86b377SEli Cohen 		mvq->fw_state = state;
1071*1a86b377SEli Cohen 
1072*1a86b377SEli Cohen 	return err;
1073*1a86b377SEli Cohen }
1074*1a86b377SEli Cohen 
1075*1a86b377SEli Cohen static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1076*1a86b377SEli Cohen {
1077*1a86b377SEli Cohen 	u16 idx = mvq->index;
1078*1a86b377SEli Cohen 	int err;
1079*1a86b377SEli Cohen 
1080*1a86b377SEli Cohen 	if (!mvq->num_ent)
1081*1a86b377SEli Cohen 		return 0;
1082*1a86b377SEli Cohen 
1083*1a86b377SEli Cohen 	if (mvq->initialized) {
1084*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
1085*1a86b377SEli Cohen 		return -EINVAL;
1086*1a86b377SEli Cohen 	}
1087*1a86b377SEli Cohen 
1088*1a86b377SEli Cohen 	err = cq_create(ndev, idx, mvq->num_ent);
1089*1a86b377SEli Cohen 	if (err)
1090*1a86b377SEli Cohen 		return err;
1091*1a86b377SEli Cohen 
1092*1a86b377SEli Cohen 	err = qp_create(ndev, mvq, &mvq->fwqp);
1093*1a86b377SEli Cohen 	if (err)
1094*1a86b377SEli Cohen 		goto err_fwqp;
1095*1a86b377SEli Cohen 
1096*1a86b377SEli Cohen 	err = qp_create(ndev, mvq, &mvq->vqqp);
1097*1a86b377SEli Cohen 	if (err)
1098*1a86b377SEli Cohen 		goto err_vqqp;
1099*1a86b377SEli Cohen 
1100*1a86b377SEli Cohen 	err = connect_qps(ndev, mvq);
1101*1a86b377SEli Cohen 	if (err)
1102*1a86b377SEli Cohen 		goto err_connect;
1103*1a86b377SEli Cohen 
1104*1a86b377SEli Cohen 	err = create_virtqueue(ndev, mvq);
1105*1a86b377SEli Cohen 	if (err)
1106*1a86b377SEli Cohen 		goto err_connect;
1107*1a86b377SEli Cohen 
1108*1a86b377SEli Cohen 	if (mvq->ready) {
1109*1a86b377SEli Cohen 		err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
1110*1a86b377SEli Cohen 		if (err) {
1111*1a86b377SEli Cohen 			mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
1112*1a86b377SEli Cohen 				       idx, err);
1113*1a86b377SEli Cohen 			goto err_connect;
1114*1a86b377SEli Cohen 		}
1115*1a86b377SEli Cohen 	}
1116*1a86b377SEli Cohen 
1117*1a86b377SEli Cohen 	mvq->initialized = true;
1118*1a86b377SEli Cohen 	return 0;
1119*1a86b377SEli Cohen 
1120*1a86b377SEli Cohen err_connect:
1121*1a86b377SEli Cohen 	qp_destroy(ndev, &mvq->vqqp);
1122*1a86b377SEli Cohen err_vqqp:
1123*1a86b377SEli Cohen 	qp_destroy(ndev, &mvq->fwqp);
1124*1a86b377SEli Cohen err_fwqp:
1125*1a86b377SEli Cohen 	cq_destroy(ndev, idx);
1126*1a86b377SEli Cohen 	return err;
1127*1a86b377SEli Cohen }
1128*1a86b377SEli Cohen 
1129*1a86b377SEli Cohen static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1130*1a86b377SEli Cohen {
1131*1a86b377SEli Cohen 	struct mlx5_virtq_attr attr;
1132*1a86b377SEli Cohen 
1133*1a86b377SEli Cohen 	if (!mvq->initialized)
1134*1a86b377SEli Cohen 		return;
1135*1a86b377SEli Cohen 
1136*1a86b377SEli Cohen 	if (query_virtqueue(ndev, mvq, &attr)) {
1137*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
1138*1a86b377SEli Cohen 		return;
1139*1a86b377SEli Cohen 	}
1140*1a86b377SEli Cohen 	if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
1141*1a86b377SEli Cohen 		return;
1142*1a86b377SEli Cohen 
1143*1a86b377SEli Cohen 	if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
1144*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
1145*1a86b377SEli Cohen }
1146*1a86b377SEli Cohen 
1147*1a86b377SEli Cohen static void suspend_vqs(struct mlx5_vdpa_net *ndev)
1148*1a86b377SEli Cohen {
1149*1a86b377SEli Cohen 	int i;
1150*1a86b377SEli Cohen 
1151*1a86b377SEli Cohen 	for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
1152*1a86b377SEli Cohen 		suspend_vq(ndev, &ndev->vqs[i]);
1153*1a86b377SEli Cohen }
1154*1a86b377SEli Cohen 
1155*1a86b377SEli Cohen static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1156*1a86b377SEli Cohen {
1157*1a86b377SEli Cohen 	if (!mvq->initialized)
1158*1a86b377SEli Cohen 		return;
1159*1a86b377SEli Cohen 
1160*1a86b377SEli Cohen 	suspend_vq(ndev, mvq);
1161*1a86b377SEli Cohen 	destroy_virtqueue(ndev, mvq);
1162*1a86b377SEli Cohen 	qp_destroy(ndev, &mvq->vqqp);
1163*1a86b377SEli Cohen 	qp_destroy(ndev, &mvq->fwqp);
1164*1a86b377SEli Cohen 	cq_destroy(ndev, mvq->index);
1165*1a86b377SEli Cohen 	mvq->initialized = false;
1166*1a86b377SEli Cohen }
1167*1a86b377SEli Cohen 
1168*1a86b377SEli Cohen static int create_rqt(struct mlx5_vdpa_net *ndev)
1169*1a86b377SEli Cohen {
1170*1a86b377SEli Cohen 	int log_max_rqt;
1171*1a86b377SEli Cohen 	__be32 *list;
1172*1a86b377SEli Cohen 	void *rqtc;
1173*1a86b377SEli Cohen 	int inlen;
1174*1a86b377SEli Cohen 	void *in;
1175*1a86b377SEli Cohen 	int i, j;
1176*1a86b377SEli Cohen 	int err;
1177*1a86b377SEli Cohen 
1178*1a86b377SEli Cohen 	log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
1179*1a86b377SEli Cohen 	if (log_max_rqt < 1)
1180*1a86b377SEli Cohen 		return -EOPNOTSUPP;
1181*1a86b377SEli Cohen 
1182*1a86b377SEli Cohen 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
1183*1a86b377SEli Cohen 	in = kzalloc(inlen, GFP_KERNEL);
1184*1a86b377SEli Cohen 	if (!in)
1185*1a86b377SEli Cohen 		return -ENOMEM;
1186*1a86b377SEli Cohen 
1187*1a86b377SEli Cohen 	MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
1188*1a86b377SEli Cohen 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1189*1a86b377SEli Cohen 
1190*1a86b377SEli Cohen 	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
1191*1a86b377SEli Cohen 	MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
1192*1a86b377SEli Cohen 	MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
1193*1a86b377SEli Cohen 	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
1194*1a86b377SEli Cohen 	for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
1195*1a86b377SEli Cohen 		if (!ndev->vqs[j].initialized)
1196*1a86b377SEli Cohen 			continue;
1197*1a86b377SEli Cohen 
1198*1a86b377SEli Cohen 		if (!vq_is_tx(ndev->vqs[j].index)) {
1199*1a86b377SEli Cohen 			list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
1200*1a86b377SEli Cohen 			i++;
1201*1a86b377SEli Cohen 		}
1202*1a86b377SEli Cohen 	}
1203*1a86b377SEli Cohen 
1204*1a86b377SEli Cohen 	err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
1205*1a86b377SEli Cohen 	kfree(in);
1206*1a86b377SEli Cohen 	if (err)
1207*1a86b377SEli Cohen 		return err;
1208*1a86b377SEli Cohen 
1209*1a86b377SEli Cohen 	return 0;
1210*1a86b377SEli Cohen }
1211*1a86b377SEli Cohen 
1212*1a86b377SEli Cohen static void destroy_rqt(struct mlx5_vdpa_net *ndev)
1213*1a86b377SEli Cohen {
1214*1a86b377SEli Cohen 	mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
1215*1a86b377SEli Cohen }
1216*1a86b377SEli Cohen 
1217*1a86b377SEli Cohen static int create_tir(struct mlx5_vdpa_net *ndev)
1218*1a86b377SEli Cohen {
1219*1a86b377SEli Cohen #define HASH_IP_L4PORTS                                                                            \
1220*1a86b377SEli Cohen 	(MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT |  \
1221*1a86b377SEli Cohen 	 MLX5_HASH_FIELD_SEL_L4_DPORT)
1222*1a86b377SEli Cohen 	static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
1223*1a86b377SEli Cohen 						   0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
1224*1a86b377SEli Cohen 						   0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
1225*1a86b377SEli Cohen 						   0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
1226*1a86b377SEli Cohen 						   0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
1227*1a86b377SEli Cohen 	void *rss_key;
1228*1a86b377SEli Cohen 	void *outer;
1229*1a86b377SEli Cohen 	void *tirc;
1230*1a86b377SEli Cohen 	void *in;
1231*1a86b377SEli Cohen 	int err;
1232*1a86b377SEli Cohen 
1233*1a86b377SEli Cohen 	in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
1234*1a86b377SEli Cohen 	if (!in)
1235*1a86b377SEli Cohen 		return -ENOMEM;
1236*1a86b377SEli Cohen 
1237*1a86b377SEli Cohen 	MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
1238*1a86b377SEli Cohen 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1239*1a86b377SEli Cohen 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1240*1a86b377SEli Cohen 
1241*1a86b377SEli Cohen 	MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1242*1a86b377SEli Cohen 	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1243*1a86b377SEli Cohen 	rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1244*1a86b377SEli Cohen 	memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
1245*1a86b377SEli Cohen 
1246*1a86b377SEli Cohen 	outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1247*1a86b377SEli Cohen 	MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
1248*1a86b377SEli Cohen 	MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
1249*1a86b377SEli Cohen 	MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
1250*1a86b377SEli Cohen 
1251*1a86b377SEli Cohen 	MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
1252*1a86b377SEli Cohen 	MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
1253*1a86b377SEli Cohen 
1254*1a86b377SEli Cohen 	err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
1255*1a86b377SEli Cohen 	kfree(in);
1256*1a86b377SEli Cohen 	return err;
1257*1a86b377SEli Cohen }
1258*1a86b377SEli Cohen 
1259*1a86b377SEli Cohen static void destroy_tir(struct mlx5_vdpa_net *ndev)
1260*1a86b377SEli Cohen {
1261*1a86b377SEli Cohen 	mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
1262*1a86b377SEli Cohen }
1263*1a86b377SEli Cohen 
1264*1a86b377SEli Cohen static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
1265*1a86b377SEli Cohen {
1266*1a86b377SEli Cohen 	struct mlx5_flow_destination dest[2] = {};
1267*1a86b377SEli Cohen 	struct mlx5_flow_table_attr ft_attr = {};
1268*1a86b377SEli Cohen 	struct mlx5_flow_act flow_act = {};
1269*1a86b377SEli Cohen 	struct mlx5_flow_namespace *ns;
1270*1a86b377SEli Cohen 	int err;
1271*1a86b377SEli Cohen 
1272*1a86b377SEli Cohen 	/* for now, one entry, match all, forward to tir */
1273*1a86b377SEli Cohen 	ft_attr.max_fte = 1;
1274*1a86b377SEli Cohen 	ft_attr.autogroup.max_num_groups = 1;
1275*1a86b377SEli Cohen 
1276*1a86b377SEli Cohen 	ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1277*1a86b377SEli Cohen 	if (!ns) {
1278*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
1279*1a86b377SEli Cohen 		return -EOPNOTSUPP;
1280*1a86b377SEli Cohen 	}
1281*1a86b377SEli Cohen 
1282*1a86b377SEli Cohen 	ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1283*1a86b377SEli Cohen 	if (IS_ERR(ndev->rxft))
1284*1a86b377SEli Cohen 		return PTR_ERR(ndev->rxft);
1285*1a86b377SEli Cohen 
1286*1a86b377SEli Cohen 	ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
1287*1a86b377SEli Cohen 	if (IS_ERR(ndev->rx_counter)) {
1288*1a86b377SEli Cohen 		err = PTR_ERR(ndev->rx_counter);
1289*1a86b377SEli Cohen 		goto err_fc;
1290*1a86b377SEli Cohen 	}
1291*1a86b377SEli Cohen 
1292*1a86b377SEli Cohen 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1293*1a86b377SEli Cohen 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1294*1a86b377SEli Cohen 	dest[0].tir_num = ndev->res.tirn;
1295*1a86b377SEli Cohen 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1296*1a86b377SEli Cohen 	dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
1297*1a86b377SEli Cohen 	ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
1298*1a86b377SEli Cohen 	if (IS_ERR(ndev->rx_rule)) {
1299*1a86b377SEli Cohen 		err = PTR_ERR(ndev->rx_rule);
1300*1a86b377SEli Cohen 		ndev->rx_rule = NULL;
1301*1a86b377SEli Cohen 		goto err_rule;
1302*1a86b377SEli Cohen 	}
1303*1a86b377SEli Cohen 
1304*1a86b377SEli Cohen 	return 0;
1305*1a86b377SEli Cohen 
1306*1a86b377SEli Cohen err_rule:
1307*1a86b377SEli Cohen 	mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1308*1a86b377SEli Cohen err_fc:
1309*1a86b377SEli Cohen 	mlx5_destroy_flow_table(ndev->rxft);
1310*1a86b377SEli Cohen 	return err;
1311*1a86b377SEli Cohen }
1312*1a86b377SEli Cohen 
1313*1a86b377SEli Cohen static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
1314*1a86b377SEli Cohen {
1315*1a86b377SEli Cohen 	if (!ndev->rx_rule)
1316*1a86b377SEli Cohen 		return;
1317*1a86b377SEli Cohen 
1318*1a86b377SEli Cohen 	mlx5_del_flow_rules(ndev->rx_rule);
1319*1a86b377SEli Cohen 	mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1320*1a86b377SEli Cohen 	mlx5_destroy_flow_table(ndev->rxft);
1321*1a86b377SEli Cohen 
1322*1a86b377SEli Cohen 	ndev->rx_rule = NULL;
1323*1a86b377SEli Cohen }
1324*1a86b377SEli Cohen 
1325*1a86b377SEli Cohen static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
1326*1a86b377SEli Cohen {
1327*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1328*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1329*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1330*1a86b377SEli Cohen 
1331*1a86b377SEli Cohen 	if (unlikely(!mvq->ready))
1332*1a86b377SEli Cohen 		return;
1333*1a86b377SEli Cohen 
1334*1a86b377SEli Cohen 	iowrite16(idx, ndev->mvdev.res.kick_addr);
1335*1a86b377SEli Cohen }
1336*1a86b377SEli Cohen 
1337*1a86b377SEli Cohen static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
1338*1a86b377SEli Cohen 				    u64 driver_area, u64 device_area)
1339*1a86b377SEli Cohen {
1340*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1341*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1342*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1343*1a86b377SEli Cohen 
1344*1a86b377SEli Cohen 	mvq->desc_addr = desc_area;
1345*1a86b377SEli Cohen 	mvq->device_addr = device_area;
1346*1a86b377SEli Cohen 	mvq->driver_addr = driver_area;
1347*1a86b377SEli Cohen 	return 0;
1348*1a86b377SEli Cohen }
1349*1a86b377SEli Cohen 
1350*1a86b377SEli Cohen static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
1351*1a86b377SEli Cohen {
1352*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1353*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1354*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq;
1355*1a86b377SEli Cohen 
1356*1a86b377SEli Cohen 	mvq = &ndev->vqs[idx];
1357*1a86b377SEli Cohen 	mvq->num_ent = num;
1358*1a86b377SEli Cohen }
1359*1a86b377SEli Cohen 
1360*1a86b377SEli Cohen static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
1361*1a86b377SEli Cohen {
1362*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1363*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1364*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
1365*1a86b377SEli Cohen 
1366*1a86b377SEli Cohen 	vq->event_cb = *cb;
1367*1a86b377SEli Cohen }
1368*1a86b377SEli Cohen 
1369*1a86b377SEli Cohen static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
1370*1a86b377SEli Cohen {
1371*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1372*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1373*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1374*1a86b377SEli Cohen 
1375*1a86b377SEli Cohen 	if (!ready)
1376*1a86b377SEli Cohen 		suspend_vq(ndev, mvq);
1377*1a86b377SEli Cohen 
1378*1a86b377SEli Cohen 	mvq->ready = ready;
1379*1a86b377SEli Cohen }
1380*1a86b377SEli Cohen 
1381*1a86b377SEli Cohen static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
1382*1a86b377SEli Cohen {
1383*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1384*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1385*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1386*1a86b377SEli Cohen 
1387*1a86b377SEli Cohen 	return mvq->ready;
1388*1a86b377SEli Cohen }
1389*1a86b377SEli Cohen 
1390*1a86b377SEli Cohen static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
1391*1a86b377SEli Cohen 				  const struct vdpa_vq_state *state)
1392*1a86b377SEli Cohen {
1393*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1394*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1395*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1396*1a86b377SEli Cohen 
1397*1a86b377SEli Cohen 	if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
1398*1a86b377SEli Cohen 		mlx5_vdpa_warn(mvdev, "can't modify available index\n");
1399*1a86b377SEli Cohen 		return -EINVAL;
1400*1a86b377SEli Cohen 	}
1401*1a86b377SEli Cohen 
1402*1a86b377SEli Cohen 	mvq->avail_idx = state->avail_index;
1403*1a86b377SEli Cohen 	return 0;
1404*1a86b377SEli Cohen }
1405*1a86b377SEli Cohen 
1406*1a86b377SEli Cohen static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
1407*1a86b377SEli Cohen {
1408*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1409*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1410*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
1411*1a86b377SEli Cohen 	struct mlx5_virtq_attr attr;
1412*1a86b377SEli Cohen 	int err;
1413*1a86b377SEli Cohen 
1414*1a86b377SEli Cohen 	if (!mvq->initialized)
1415*1a86b377SEli Cohen 		return -EAGAIN;
1416*1a86b377SEli Cohen 
1417*1a86b377SEli Cohen 	err = query_virtqueue(ndev, mvq, &attr);
1418*1a86b377SEli Cohen 	if (err) {
1419*1a86b377SEli Cohen 		mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
1420*1a86b377SEli Cohen 		return err;
1421*1a86b377SEli Cohen 	}
1422*1a86b377SEli Cohen 	state->avail_index = attr.available_index;
1423*1a86b377SEli Cohen 	return 0;
1424*1a86b377SEli Cohen }
1425*1a86b377SEli Cohen 
1426*1a86b377SEli Cohen static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
1427*1a86b377SEli Cohen {
1428*1a86b377SEli Cohen 	return PAGE_SIZE;
1429*1a86b377SEli Cohen }
1430*1a86b377SEli Cohen 
1431*1a86b377SEli Cohen enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
1432*1a86b377SEli Cohen 	MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
1433*1a86b377SEli Cohen 	MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
1434*1a86b377SEli Cohen 	MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12,
1435*1a86b377SEli Cohen };
1436*1a86b377SEli Cohen 
1437*1a86b377SEli Cohen static u64 mlx_to_vritio_features(u16 dev_features)
1438*1a86b377SEli Cohen {
1439*1a86b377SEli Cohen 	u64 result = 0;
1440*1a86b377SEli Cohen 
1441*1a86b377SEli Cohen 	if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
1442*1a86b377SEli Cohen 		result |= BIT(VIRTIO_NET_F_GUEST_CSUM);
1443*1a86b377SEli Cohen 	if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
1444*1a86b377SEli Cohen 		result |= BIT(VIRTIO_NET_F_CSUM);
1445*1a86b377SEli Cohen 	if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
1446*1a86b377SEli Cohen 		result |= BIT(VIRTIO_NET_F_HOST_TSO6);
1447*1a86b377SEli Cohen 	if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
1448*1a86b377SEli Cohen 		result |= BIT(VIRTIO_NET_F_HOST_TSO4);
1449*1a86b377SEli Cohen 
1450*1a86b377SEli Cohen 	return result;
1451*1a86b377SEli Cohen }
1452*1a86b377SEli Cohen 
1453*1a86b377SEli Cohen static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
1454*1a86b377SEli Cohen {
1455*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1456*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1457*1a86b377SEli Cohen 	u16 dev_features;
1458*1a86b377SEli Cohen 
1459*1a86b377SEli Cohen 	dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
1460*1a86b377SEli Cohen 	ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
1461*1a86b377SEli Cohen 	if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
1462*1a86b377SEli Cohen 		ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1);
1463*1a86b377SEli Cohen 	ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM);
1464*1a86b377SEli Cohen 	print_features(mvdev, ndev->mvdev.mlx_features, false);
1465*1a86b377SEli Cohen 	return ndev->mvdev.mlx_features;
1466*1a86b377SEli Cohen }
1467*1a86b377SEli Cohen 
1468*1a86b377SEli Cohen static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
1469*1a86b377SEli Cohen {
1470*1a86b377SEli Cohen 	if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM)))
1471*1a86b377SEli Cohen 		return -EOPNOTSUPP;
1472*1a86b377SEli Cohen 
1473*1a86b377SEli Cohen 	return 0;
1474*1a86b377SEli Cohen }
1475*1a86b377SEli Cohen 
1476*1a86b377SEli Cohen static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
1477*1a86b377SEli Cohen {
1478*1a86b377SEli Cohen 	int err;
1479*1a86b377SEli Cohen 	int i;
1480*1a86b377SEli Cohen 
1481*1a86b377SEli Cohen 	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
1482*1a86b377SEli Cohen 		err = setup_vq(ndev, &ndev->vqs[i]);
1483*1a86b377SEli Cohen 		if (err)
1484*1a86b377SEli Cohen 			goto err_vq;
1485*1a86b377SEli Cohen 	}
1486*1a86b377SEli Cohen 
1487*1a86b377SEli Cohen 	return 0;
1488*1a86b377SEli Cohen 
1489*1a86b377SEli Cohen err_vq:
1490*1a86b377SEli Cohen 	for (--i; i >= 0; i--)
1491*1a86b377SEli Cohen 		teardown_vq(ndev, &ndev->vqs[i]);
1492*1a86b377SEli Cohen 
1493*1a86b377SEli Cohen 	return err;
1494*1a86b377SEli Cohen }
1495*1a86b377SEli Cohen 
1496*1a86b377SEli Cohen static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
1497*1a86b377SEli Cohen {
1498*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq;
1499*1a86b377SEli Cohen 	int i;
1500*1a86b377SEli Cohen 
1501*1a86b377SEli Cohen 	for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
1502*1a86b377SEli Cohen 		mvq = &ndev->vqs[i];
1503*1a86b377SEli Cohen 		if (!mvq->initialized)
1504*1a86b377SEli Cohen 			continue;
1505*1a86b377SEli Cohen 
1506*1a86b377SEli Cohen 		teardown_vq(ndev, mvq);
1507*1a86b377SEli Cohen 	}
1508*1a86b377SEli Cohen }
1509*1a86b377SEli Cohen 
1510*1a86b377SEli Cohen static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
1511*1a86b377SEli Cohen {
1512*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1513*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1514*1a86b377SEli Cohen 	int err;
1515*1a86b377SEli Cohen 
1516*1a86b377SEli Cohen 	print_features(mvdev, features, true);
1517*1a86b377SEli Cohen 
1518*1a86b377SEli Cohen 	err = verify_min_features(mvdev, features);
1519*1a86b377SEli Cohen 	if (err)
1520*1a86b377SEli Cohen 		return err;
1521*1a86b377SEli Cohen 
1522*1a86b377SEli Cohen 	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
1523*1a86b377SEli Cohen 	return err;
1524*1a86b377SEli Cohen }
1525*1a86b377SEli Cohen 
1526*1a86b377SEli Cohen static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
1527*1a86b377SEli Cohen {
1528*1a86b377SEli Cohen 	/* not implemented */
1529*1a86b377SEli Cohen 	mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
1530*1a86b377SEli Cohen }
1531*1a86b377SEli Cohen 
1532*1a86b377SEli Cohen #define MLX5_VDPA_MAX_VQ_ENTRIES 256
1533*1a86b377SEli Cohen static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
1534*1a86b377SEli Cohen {
1535*1a86b377SEli Cohen 	return MLX5_VDPA_MAX_VQ_ENTRIES;
1536*1a86b377SEli Cohen }
1537*1a86b377SEli Cohen 
1538*1a86b377SEli Cohen static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
1539*1a86b377SEli Cohen {
1540*1a86b377SEli Cohen 	return VIRTIO_ID_NET;
1541*1a86b377SEli Cohen }
1542*1a86b377SEli Cohen 
1543*1a86b377SEli Cohen static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
1544*1a86b377SEli Cohen {
1545*1a86b377SEli Cohen 	return PCI_VENDOR_ID_MELLANOX;
1546*1a86b377SEli Cohen }
1547*1a86b377SEli Cohen 
1548*1a86b377SEli Cohen static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
1549*1a86b377SEli Cohen {
1550*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1551*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1552*1a86b377SEli Cohen 
1553*1a86b377SEli Cohen 	print_status(mvdev, ndev->mvdev.status, false);
1554*1a86b377SEli Cohen 	return ndev->mvdev.status;
1555*1a86b377SEli Cohen }
1556*1a86b377SEli Cohen 
1557*1a86b377SEli Cohen static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1558*1a86b377SEli Cohen {
1559*1a86b377SEli Cohen 	struct mlx5_vq_restore_info *ri = &mvq->ri;
1560*1a86b377SEli Cohen 	struct mlx5_virtq_attr attr;
1561*1a86b377SEli Cohen 	int err;
1562*1a86b377SEli Cohen 
1563*1a86b377SEli Cohen 	if (!mvq->initialized)
1564*1a86b377SEli Cohen 		return 0;
1565*1a86b377SEli Cohen 
1566*1a86b377SEli Cohen 	err = query_virtqueue(ndev, mvq, &attr);
1567*1a86b377SEli Cohen 	if (err)
1568*1a86b377SEli Cohen 		return err;
1569*1a86b377SEli Cohen 
1570*1a86b377SEli Cohen 	ri->avail_index = attr.available_index;
1571*1a86b377SEli Cohen 	ri->ready = mvq->ready;
1572*1a86b377SEli Cohen 	ri->num_ent = mvq->num_ent;
1573*1a86b377SEli Cohen 	ri->desc_addr = mvq->desc_addr;
1574*1a86b377SEli Cohen 	ri->device_addr = mvq->device_addr;
1575*1a86b377SEli Cohen 	ri->driver_addr = mvq->driver_addr;
1576*1a86b377SEli Cohen 	ri->cb = mvq->event_cb;
1577*1a86b377SEli Cohen 	ri->restore = true;
1578*1a86b377SEli Cohen 	return 0;
1579*1a86b377SEli Cohen }
1580*1a86b377SEli Cohen 
1581*1a86b377SEli Cohen static int save_channels_info(struct mlx5_vdpa_net *ndev)
1582*1a86b377SEli Cohen {
1583*1a86b377SEli Cohen 	int i;
1584*1a86b377SEli Cohen 
1585*1a86b377SEli Cohen 	for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1586*1a86b377SEli Cohen 		memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
1587*1a86b377SEli Cohen 		save_channel_info(ndev, &ndev->vqs[i]);
1588*1a86b377SEli Cohen 	}
1589*1a86b377SEli Cohen 	return 0;
1590*1a86b377SEli Cohen }
1591*1a86b377SEli Cohen 
1592*1a86b377SEli Cohen static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
1593*1a86b377SEli Cohen {
1594*1a86b377SEli Cohen 	int i;
1595*1a86b377SEli Cohen 
1596*1a86b377SEli Cohen 	for (i = 0; i < ndev->mvdev.max_vqs; i++)
1597*1a86b377SEli Cohen 		memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1598*1a86b377SEli Cohen }
1599*1a86b377SEli Cohen 
1600*1a86b377SEli Cohen static void restore_channels_info(struct mlx5_vdpa_net *ndev)
1601*1a86b377SEli Cohen {
1602*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq;
1603*1a86b377SEli Cohen 	struct mlx5_vq_restore_info *ri;
1604*1a86b377SEli Cohen 	int i;
1605*1a86b377SEli Cohen 
1606*1a86b377SEli Cohen 	mlx5_clear_vqs(ndev);
1607*1a86b377SEli Cohen 	init_mvqs(ndev);
1608*1a86b377SEli Cohen 	for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1609*1a86b377SEli Cohen 		mvq = &ndev->vqs[i];
1610*1a86b377SEli Cohen 		ri = &mvq->ri;
1611*1a86b377SEli Cohen 		if (!ri->restore)
1612*1a86b377SEli Cohen 			continue;
1613*1a86b377SEli Cohen 
1614*1a86b377SEli Cohen 		mvq->avail_idx = ri->avail_index;
1615*1a86b377SEli Cohen 		mvq->ready = ri->ready;
1616*1a86b377SEli Cohen 		mvq->num_ent = ri->num_ent;
1617*1a86b377SEli Cohen 		mvq->desc_addr = ri->desc_addr;
1618*1a86b377SEli Cohen 		mvq->device_addr = ri->device_addr;
1619*1a86b377SEli Cohen 		mvq->driver_addr = ri->driver_addr;
1620*1a86b377SEli Cohen 		mvq->event_cb = ri->cb;
1621*1a86b377SEli Cohen 	}
1622*1a86b377SEli Cohen }
1623*1a86b377SEli Cohen 
1624*1a86b377SEli Cohen static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
1625*1a86b377SEli Cohen {
1626*1a86b377SEli Cohen 	int err;
1627*1a86b377SEli Cohen 
1628*1a86b377SEli Cohen 	suspend_vqs(ndev);
1629*1a86b377SEli Cohen 	err = save_channels_info(ndev);
1630*1a86b377SEli Cohen 	if (err)
1631*1a86b377SEli Cohen 		goto err_mr;
1632*1a86b377SEli Cohen 
1633*1a86b377SEli Cohen 	teardown_driver(ndev);
1634*1a86b377SEli Cohen 	mlx5_vdpa_destroy_mr(&ndev->mvdev);
1635*1a86b377SEli Cohen 	err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
1636*1a86b377SEli Cohen 	if (err)
1637*1a86b377SEli Cohen 		goto err_mr;
1638*1a86b377SEli Cohen 
1639*1a86b377SEli Cohen 	restore_channels_info(ndev);
1640*1a86b377SEli Cohen 	err = setup_driver(ndev);
1641*1a86b377SEli Cohen 	if (err)
1642*1a86b377SEli Cohen 		goto err_setup;
1643*1a86b377SEli Cohen 
1644*1a86b377SEli Cohen 	return 0;
1645*1a86b377SEli Cohen 
1646*1a86b377SEli Cohen err_setup:
1647*1a86b377SEli Cohen 	mlx5_vdpa_destroy_mr(&ndev->mvdev);
1648*1a86b377SEli Cohen err_mr:
1649*1a86b377SEli Cohen 	return err;
1650*1a86b377SEli Cohen }
1651*1a86b377SEli Cohen 
1652*1a86b377SEli Cohen static int setup_driver(struct mlx5_vdpa_net *ndev)
1653*1a86b377SEli Cohen {
1654*1a86b377SEli Cohen 	int err;
1655*1a86b377SEli Cohen 
1656*1a86b377SEli Cohen 	mutex_lock(&ndev->reslock);
1657*1a86b377SEli Cohen 	if (ndev->setup) {
1658*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
1659*1a86b377SEli Cohen 		err = 0;
1660*1a86b377SEli Cohen 		goto out;
1661*1a86b377SEli Cohen 	}
1662*1a86b377SEli Cohen 	err = setup_virtqueues(ndev);
1663*1a86b377SEli Cohen 	if (err) {
1664*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
1665*1a86b377SEli Cohen 		goto out;
1666*1a86b377SEli Cohen 	}
1667*1a86b377SEli Cohen 
1668*1a86b377SEli Cohen 	err = create_rqt(ndev);
1669*1a86b377SEli Cohen 	if (err) {
1670*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
1671*1a86b377SEli Cohen 		goto err_rqt;
1672*1a86b377SEli Cohen 	}
1673*1a86b377SEli Cohen 
1674*1a86b377SEli Cohen 	err = create_tir(ndev);
1675*1a86b377SEli Cohen 	if (err) {
1676*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
1677*1a86b377SEli Cohen 		goto err_tir;
1678*1a86b377SEli Cohen 	}
1679*1a86b377SEli Cohen 
1680*1a86b377SEli Cohen 	err = add_fwd_to_tir(ndev);
1681*1a86b377SEli Cohen 	if (err) {
1682*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
1683*1a86b377SEli Cohen 		goto err_fwd;
1684*1a86b377SEli Cohen 	}
1685*1a86b377SEli Cohen 	ndev->setup = true;
1686*1a86b377SEli Cohen 	mutex_unlock(&ndev->reslock);
1687*1a86b377SEli Cohen 
1688*1a86b377SEli Cohen 	return 0;
1689*1a86b377SEli Cohen 
1690*1a86b377SEli Cohen err_fwd:
1691*1a86b377SEli Cohen 	destroy_tir(ndev);
1692*1a86b377SEli Cohen err_tir:
1693*1a86b377SEli Cohen 	destroy_rqt(ndev);
1694*1a86b377SEli Cohen err_rqt:
1695*1a86b377SEli Cohen 	teardown_virtqueues(ndev);
1696*1a86b377SEli Cohen out:
1697*1a86b377SEli Cohen 	mutex_unlock(&ndev->reslock);
1698*1a86b377SEli Cohen 	return err;
1699*1a86b377SEli Cohen }
1700*1a86b377SEli Cohen 
1701*1a86b377SEli Cohen static void teardown_driver(struct mlx5_vdpa_net *ndev)
1702*1a86b377SEli Cohen {
1703*1a86b377SEli Cohen 	mutex_lock(&ndev->reslock);
1704*1a86b377SEli Cohen 	if (!ndev->setup)
1705*1a86b377SEli Cohen 		goto out;
1706*1a86b377SEli Cohen 
1707*1a86b377SEli Cohen 	remove_fwd_to_tir(ndev);
1708*1a86b377SEli Cohen 	destroy_tir(ndev);
1709*1a86b377SEli Cohen 	destroy_rqt(ndev);
1710*1a86b377SEli Cohen 	teardown_virtqueues(ndev);
1711*1a86b377SEli Cohen 	ndev->setup = false;
1712*1a86b377SEli Cohen out:
1713*1a86b377SEli Cohen 	mutex_unlock(&ndev->reslock);
1714*1a86b377SEli Cohen }
1715*1a86b377SEli Cohen 
1716*1a86b377SEli Cohen static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
1717*1a86b377SEli Cohen {
1718*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1719*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1720*1a86b377SEli Cohen 	int err;
1721*1a86b377SEli Cohen 
1722*1a86b377SEli Cohen 	print_status(mvdev, status, true);
1723*1a86b377SEli Cohen 	if (!status) {
1724*1a86b377SEli Cohen 		mlx5_vdpa_info(mvdev, "performing device reset\n");
1725*1a86b377SEli Cohen 		teardown_driver(ndev);
1726*1a86b377SEli Cohen 		mlx5_vdpa_destroy_mr(&ndev->mvdev);
1727*1a86b377SEli Cohen 		ndev->mvdev.status = 0;
1728*1a86b377SEli Cohen 		ndev->mvdev.mlx_features = 0;
1729*1a86b377SEli Cohen 		++mvdev->generation;
1730*1a86b377SEli Cohen 		return;
1731*1a86b377SEli Cohen 	}
1732*1a86b377SEli Cohen 
1733*1a86b377SEli Cohen 	if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
1734*1a86b377SEli Cohen 		if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
1735*1a86b377SEli Cohen 			err = setup_driver(ndev);
1736*1a86b377SEli Cohen 			if (err) {
1737*1a86b377SEli Cohen 				mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
1738*1a86b377SEli Cohen 				goto err_setup;
1739*1a86b377SEli Cohen 			}
1740*1a86b377SEli Cohen 		} else {
1741*1a86b377SEli Cohen 			mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
1742*1a86b377SEli Cohen 			return;
1743*1a86b377SEli Cohen 		}
1744*1a86b377SEli Cohen 	}
1745*1a86b377SEli Cohen 
1746*1a86b377SEli Cohen 	ndev->mvdev.status = status;
1747*1a86b377SEli Cohen 	return;
1748*1a86b377SEli Cohen 
1749*1a86b377SEli Cohen err_setup:
1750*1a86b377SEli Cohen 	mlx5_vdpa_destroy_mr(&ndev->mvdev);
1751*1a86b377SEli Cohen 	ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
1752*1a86b377SEli Cohen }
1753*1a86b377SEli Cohen 
1754*1a86b377SEli Cohen static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
1755*1a86b377SEli Cohen 				 unsigned int len)
1756*1a86b377SEli Cohen {
1757*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1758*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1759*1a86b377SEli Cohen 
1760*1a86b377SEli Cohen 	if (offset + len < sizeof(struct virtio_net_config))
1761*1a86b377SEli Cohen 		memcpy(buf, &ndev->config + offset, len);
1762*1a86b377SEli Cohen }
1763*1a86b377SEli Cohen 
1764*1a86b377SEli Cohen static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
1765*1a86b377SEli Cohen 				 unsigned int len)
1766*1a86b377SEli Cohen {
1767*1a86b377SEli Cohen 	/* not supported */
1768*1a86b377SEli Cohen }
1769*1a86b377SEli Cohen 
1770*1a86b377SEli Cohen static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
1771*1a86b377SEli Cohen {
1772*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1773*1a86b377SEli Cohen 
1774*1a86b377SEli Cohen 	return mvdev->generation;
1775*1a86b377SEli Cohen }
1776*1a86b377SEli Cohen 
1777*1a86b377SEli Cohen static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
1778*1a86b377SEli Cohen {
1779*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1780*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1781*1a86b377SEli Cohen 	bool change_map;
1782*1a86b377SEli Cohen 	int err;
1783*1a86b377SEli Cohen 
1784*1a86b377SEli Cohen 	err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
1785*1a86b377SEli Cohen 	if (err) {
1786*1a86b377SEli Cohen 		mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
1787*1a86b377SEli Cohen 		return err;
1788*1a86b377SEli Cohen 	}
1789*1a86b377SEli Cohen 
1790*1a86b377SEli Cohen 	if (change_map)
1791*1a86b377SEli Cohen 		return mlx5_vdpa_change_map(ndev, iotlb);
1792*1a86b377SEli Cohen 
1793*1a86b377SEli Cohen 	return 0;
1794*1a86b377SEli Cohen }
1795*1a86b377SEli Cohen 
1796*1a86b377SEli Cohen static void mlx5_vdpa_free(struct vdpa_device *vdev)
1797*1a86b377SEli Cohen {
1798*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1799*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev;
1800*1a86b377SEli Cohen 
1801*1a86b377SEli Cohen 	ndev = to_mlx5_vdpa_ndev(mvdev);
1802*1a86b377SEli Cohen 
1803*1a86b377SEli Cohen 	free_resources(ndev);
1804*1a86b377SEli Cohen 	mlx5_vdpa_free_resources(&ndev->mvdev);
1805*1a86b377SEli Cohen 	mutex_destroy(&ndev->reslock);
1806*1a86b377SEli Cohen }
1807*1a86b377SEli Cohen 
1808*1a86b377SEli Cohen static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
1809*1a86b377SEli Cohen {
1810*1a86b377SEli Cohen 	struct vdpa_notification_area ret = {};
1811*1a86b377SEli Cohen 
1812*1a86b377SEli Cohen 	return ret;
1813*1a86b377SEli Cohen }
1814*1a86b377SEli Cohen 
1815*1a86b377SEli Cohen static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
1816*1a86b377SEli Cohen {
1817*1a86b377SEli Cohen 	return -EOPNOTSUPP;
1818*1a86b377SEli Cohen }
1819*1a86b377SEli Cohen 
1820*1a86b377SEli Cohen static const struct vdpa_config_ops mlx5_vdpa_ops = {
1821*1a86b377SEli Cohen 	.set_vq_address = mlx5_vdpa_set_vq_address,
1822*1a86b377SEli Cohen 	.set_vq_num = mlx5_vdpa_set_vq_num,
1823*1a86b377SEli Cohen 	.kick_vq = mlx5_vdpa_kick_vq,
1824*1a86b377SEli Cohen 	.set_vq_cb = mlx5_vdpa_set_vq_cb,
1825*1a86b377SEli Cohen 	.set_vq_ready = mlx5_vdpa_set_vq_ready,
1826*1a86b377SEli Cohen 	.get_vq_ready = mlx5_vdpa_get_vq_ready,
1827*1a86b377SEli Cohen 	.set_vq_state = mlx5_vdpa_set_vq_state,
1828*1a86b377SEli Cohen 	.get_vq_state = mlx5_vdpa_get_vq_state,
1829*1a86b377SEli Cohen 	.get_vq_notification = mlx5_get_vq_notification,
1830*1a86b377SEli Cohen 	.get_vq_irq = mlx5_get_vq_irq,
1831*1a86b377SEli Cohen 	.get_vq_align = mlx5_vdpa_get_vq_align,
1832*1a86b377SEli Cohen 	.get_features = mlx5_vdpa_get_features,
1833*1a86b377SEli Cohen 	.set_features = mlx5_vdpa_set_features,
1834*1a86b377SEli Cohen 	.set_config_cb = mlx5_vdpa_set_config_cb,
1835*1a86b377SEli Cohen 	.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
1836*1a86b377SEli Cohen 	.get_device_id = mlx5_vdpa_get_device_id,
1837*1a86b377SEli Cohen 	.get_vendor_id = mlx5_vdpa_get_vendor_id,
1838*1a86b377SEli Cohen 	.get_status = mlx5_vdpa_get_status,
1839*1a86b377SEli Cohen 	.set_status = mlx5_vdpa_set_status,
1840*1a86b377SEli Cohen 	.get_config = mlx5_vdpa_get_config,
1841*1a86b377SEli Cohen 	.set_config = mlx5_vdpa_set_config,
1842*1a86b377SEli Cohen 	.get_generation = mlx5_vdpa_get_generation,
1843*1a86b377SEli Cohen 	.set_map = mlx5_vdpa_set_map,
1844*1a86b377SEli Cohen 	.free = mlx5_vdpa_free,
1845*1a86b377SEli Cohen };
1846*1a86b377SEli Cohen 
1847*1a86b377SEli Cohen static int alloc_resources(struct mlx5_vdpa_net *ndev)
1848*1a86b377SEli Cohen {
1849*1a86b377SEli Cohen 	struct mlx5_vdpa_net_resources *res = &ndev->res;
1850*1a86b377SEli Cohen 	int err;
1851*1a86b377SEli Cohen 
1852*1a86b377SEli Cohen 	if (res->valid) {
1853*1a86b377SEli Cohen 		mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
1854*1a86b377SEli Cohen 		return -EEXIST;
1855*1a86b377SEli Cohen 	}
1856*1a86b377SEli Cohen 
1857*1a86b377SEli Cohen 	err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
1858*1a86b377SEli Cohen 	if (err)
1859*1a86b377SEli Cohen 		return err;
1860*1a86b377SEli Cohen 
1861*1a86b377SEli Cohen 	err = create_tis(ndev);
1862*1a86b377SEli Cohen 	if (err)
1863*1a86b377SEli Cohen 		goto err_tis;
1864*1a86b377SEli Cohen 
1865*1a86b377SEli Cohen 	res->valid = true;
1866*1a86b377SEli Cohen 
1867*1a86b377SEli Cohen 	return 0;
1868*1a86b377SEli Cohen 
1869*1a86b377SEli Cohen err_tis:
1870*1a86b377SEli Cohen 	mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1871*1a86b377SEli Cohen 	return err;
1872*1a86b377SEli Cohen }
1873*1a86b377SEli Cohen 
1874*1a86b377SEli Cohen static void free_resources(struct mlx5_vdpa_net *ndev)
1875*1a86b377SEli Cohen {
1876*1a86b377SEli Cohen 	struct mlx5_vdpa_net_resources *res = &ndev->res;
1877*1a86b377SEli Cohen 
1878*1a86b377SEli Cohen 	if (!res->valid)
1879*1a86b377SEli Cohen 		return;
1880*1a86b377SEli Cohen 
1881*1a86b377SEli Cohen 	destroy_tis(ndev);
1882*1a86b377SEli Cohen 	mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1883*1a86b377SEli Cohen 	res->valid = false;
1884*1a86b377SEli Cohen }
1885*1a86b377SEli Cohen 
1886*1a86b377SEli Cohen static void init_mvqs(struct mlx5_vdpa_net *ndev)
1887*1a86b377SEli Cohen {
1888*1a86b377SEli Cohen 	struct mlx5_vdpa_virtqueue *mvq;
1889*1a86b377SEli Cohen 	int i;
1890*1a86b377SEli Cohen 
1891*1a86b377SEli Cohen 	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
1892*1a86b377SEli Cohen 		mvq = &ndev->vqs[i];
1893*1a86b377SEli Cohen 		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1894*1a86b377SEli Cohen 		mvq->index = i;
1895*1a86b377SEli Cohen 		mvq->ndev = ndev;
1896*1a86b377SEli Cohen 		mvq->fwqp.fw = true;
1897*1a86b377SEli Cohen 	}
1898*1a86b377SEli Cohen 	for (; i < ndev->mvdev.max_vqs; i++) {
1899*1a86b377SEli Cohen 		mvq = &ndev->vqs[i];
1900*1a86b377SEli Cohen 		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
1901*1a86b377SEli Cohen 		mvq->index = i;
1902*1a86b377SEli Cohen 		mvq->ndev = ndev;
1903*1a86b377SEli Cohen 	}
1904*1a86b377SEli Cohen }
1905*1a86b377SEli Cohen 
1906*1a86b377SEli Cohen void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
1907*1a86b377SEli Cohen {
1908*1a86b377SEli Cohen 	struct virtio_net_config *config;
1909*1a86b377SEli Cohen 	struct mlx5_vdpa_dev *mvdev;
1910*1a86b377SEli Cohen 	struct mlx5_vdpa_net *ndev;
1911*1a86b377SEli Cohen 	u32 max_vqs;
1912*1a86b377SEli Cohen 	int err;
1913*1a86b377SEli Cohen 
1914*1a86b377SEli Cohen 	/* we save one virtqueue for control virtqueue should we require it */
1915*1a86b377SEli Cohen 	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
1916*1a86b377SEli Cohen 	max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
1917*1a86b377SEli Cohen 
1918*1a86b377SEli Cohen 	ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
1919*1a86b377SEli Cohen 				 2 * mlx5_vdpa_max_qps(max_vqs));
1920*1a86b377SEli Cohen 	if (IS_ERR(ndev))
1921*1a86b377SEli Cohen 		return ndev;
1922*1a86b377SEli Cohen 
1923*1a86b377SEli Cohen 	ndev->mvdev.max_vqs = max_vqs;
1924*1a86b377SEli Cohen 	mvdev = &ndev->mvdev;
1925*1a86b377SEli Cohen 	mvdev->mdev = mdev;
1926*1a86b377SEli Cohen 	init_mvqs(ndev);
1927*1a86b377SEli Cohen 	mutex_init(&ndev->reslock);
1928*1a86b377SEli Cohen 	config = &ndev->config;
1929*1a86b377SEli Cohen 	err = mlx5_query_nic_vport_mtu(mdev, &config->mtu);
1930*1a86b377SEli Cohen 	if (err)
1931*1a86b377SEli Cohen 		goto err_mtu;
1932*1a86b377SEli Cohen 
1933*1a86b377SEli Cohen 	err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
1934*1a86b377SEli Cohen 	if (err)
1935*1a86b377SEli Cohen 		goto err_mtu;
1936*1a86b377SEli Cohen 
1937*1a86b377SEli Cohen 	mvdev->vdev.dma_dev = mdev->device;
1938*1a86b377SEli Cohen 	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
1939*1a86b377SEli Cohen 	if (err)
1940*1a86b377SEli Cohen 		goto err_mtu;
1941*1a86b377SEli Cohen 
1942*1a86b377SEli Cohen 	err = alloc_resources(ndev);
1943*1a86b377SEli Cohen 	if (err)
1944*1a86b377SEli Cohen 		goto err_res;
1945*1a86b377SEli Cohen 
1946*1a86b377SEli Cohen 	err = vdpa_register_device(&mvdev->vdev);
1947*1a86b377SEli Cohen 	if (err)
1948*1a86b377SEli Cohen 		goto err_reg;
1949*1a86b377SEli Cohen 
1950*1a86b377SEli Cohen 	return ndev;
1951*1a86b377SEli Cohen 
1952*1a86b377SEli Cohen err_reg:
1953*1a86b377SEli Cohen 	free_resources(ndev);
1954*1a86b377SEli Cohen err_res:
1955*1a86b377SEli Cohen 	mlx5_vdpa_free_resources(&ndev->mvdev);
1956*1a86b377SEli Cohen err_mtu:
1957*1a86b377SEli Cohen 	mutex_destroy(&ndev->reslock);
1958*1a86b377SEli Cohen 	put_device(&mvdev->vdev.dev);
1959*1a86b377SEli Cohen 	return ERR_PTR(err);
1960*1a86b377SEli Cohen }
1961*1a86b377SEli Cohen 
1962*1a86b377SEli Cohen void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev)
1963*1a86b377SEli Cohen {
1964*1a86b377SEli Cohen 	vdpa_unregister_device(&mvdev->vdev);
1965*1a86b377SEli Cohen }
1966