1*64b9f64fSJason Wang // SPDX-License-Identifier: GPL-2.0-only
2*64b9f64fSJason Wang /*
3*64b9f64fSJason Wang  * vDPA bridge driver for modern virtio-pci device
4*64b9f64fSJason Wang  *
5*64b9f64fSJason Wang  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6*64b9f64fSJason Wang  * Author: Jason Wang <jasowang@redhat.com>
7*64b9f64fSJason Wang  *
8*64b9f64fSJason Wang  * Based on virtio_pci_modern.c.
9*64b9f64fSJason Wang  */
10*64b9f64fSJason Wang 
11*64b9f64fSJason Wang #include <linux/interrupt.h>
12*64b9f64fSJason Wang #include <linux/module.h>
13*64b9f64fSJason Wang #include <linux/pci.h>
14*64b9f64fSJason Wang #include <linux/vdpa.h>
15*64b9f64fSJason Wang #include <linux/virtio.h>
16*64b9f64fSJason Wang #include <linux/virtio_config.h>
17*64b9f64fSJason Wang #include <linux/virtio_ring.h>
18*64b9f64fSJason Wang #include <linux/virtio_pci.h>
19*64b9f64fSJason Wang #include <linux/virtio_pci_modern.h>
20*64b9f64fSJason Wang 
21*64b9f64fSJason Wang #define VP_VDPA_QUEUE_MAX 256
22*64b9f64fSJason Wang #define VP_VDPA_DRIVER_NAME "vp_vdpa"
23*64b9f64fSJason Wang #define VP_VDPA_NAME_SIZE 256
24*64b9f64fSJason Wang 
25*64b9f64fSJason Wang struct vp_vring {
26*64b9f64fSJason Wang 	void __iomem *notify;
27*64b9f64fSJason Wang 	char msix_name[VP_VDPA_NAME_SIZE];
28*64b9f64fSJason Wang 	struct vdpa_callback cb;
29*64b9f64fSJason Wang 	int irq;
30*64b9f64fSJason Wang };
31*64b9f64fSJason Wang 
32*64b9f64fSJason Wang struct vp_vdpa {
33*64b9f64fSJason Wang 	struct vdpa_device vdpa;
34*64b9f64fSJason Wang 	struct virtio_pci_modern_device mdev;
35*64b9f64fSJason Wang 	struct vp_vring *vring;
36*64b9f64fSJason Wang 	struct vdpa_callback config_cb;
37*64b9f64fSJason Wang 	char msix_name[VP_VDPA_NAME_SIZE];
38*64b9f64fSJason Wang 	int config_irq;
39*64b9f64fSJason Wang 	int queues;
40*64b9f64fSJason Wang 	int vectors;
41*64b9f64fSJason Wang };
42*64b9f64fSJason Wang 
43*64b9f64fSJason Wang static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
44*64b9f64fSJason Wang {
45*64b9f64fSJason Wang 	return container_of(vdpa, struct vp_vdpa, vdpa);
46*64b9f64fSJason Wang }
47*64b9f64fSJason Wang 
48*64b9f64fSJason Wang static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
49*64b9f64fSJason Wang {
50*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
51*64b9f64fSJason Wang 
52*64b9f64fSJason Wang 	return &vp_vdpa->mdev;
53*64b9f64fSJason Wang }
54*64b9f64fSJason Wang 
55*64b9f64fSJason Wang static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
56*64b9f64fSJason Wang {
57*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
58*64b9f64fSJason Wang 
59*64b9f64fSJason Wang 	return vp_modern_get_features(mdev);
60*64b9f64fSJason Wang }
61*64b9f64fSJason Wang 
62*64b9f64fSJason Wang static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
63*64b9f64fSJason Wang {
64*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
65*64b9f64fSJason Wang 
66*64b9f64fSJason Wang 	vp_modern_set_features(mdev, features);
67*64b9f64fSJason Wang 
68*64b9f64fSJason Wang 	return 0;
69*64b9f64fSJason Wang }
70*64b9f64fSJason Wang 
71*64b9f64fSJason Wang static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
72*64b9f64fSJason Wang {
73*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
74*64b9f64fSJason Wang 
75*64b9f64fSJason Wang 	return vp_modern_get_status(mdev);
76*64b9f64fSJason Wang }
77*64b9f64fSJason Wang 
78*64b9f64fSJason Wang static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
79*64b9f64fSJason Wang {
80*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
81*64b9f64fSJason Wang 	struct pci_dev *pdev = mdev->pci_dev;
82*64b9f64fSJason Wang 	int i;
83*64b9f64fSJason Wang 
84*64b9f64fSJason Wang 	for (i = 0; i < vp_vdpa->queues; i++) {
85*64b9f64fSJason Wang 		if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
86*64b9f64fSJason Wang 			vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
87*64b9f64fSJason Wang 			devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
88*64b9f64fSJason Wang 				      &vp_vdpa->vring[i]);
89*64b9f64fSJason Wang 			vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
90*64b9f64fSJason Wang 		}
91*64b9f64fSJason Wang 	}
92*64b9f64fSJason Wang 
93*64b9f64fSJason Wang 	if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
94*64b9f64fSJason Wang 		vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
95*64b9f64fSJason Wang 		devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
96*64b9f64fSJason Wang 		vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
97*64b9f64fSJason Wang 	}
98*64b9f64fSJason Wang 
99*64b9f64fSJason Wang 	if (vp_vdpa->vectors) {
100*64b9f64fSJason Wang 		pci_free_irq_vectors(pdev);
101*64b9f64fSJason Wang 		vp_vdpa->vectors = 0;
102*64b9f64fSJason Wang 	}
103*64b9f64fSJason Wang }
104*64b9f64fSJason Wang 
105*64b9f64fSJason Wang static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
106*64b9f64fSJason Wang {
107*64b9f64fSJason Wang 	struct vp_vring *vring = arg;
108*64b9f64fSJason Wang 
109*64b9f64fSJason Wang 	if (vring->cb.callback)
110*64b9f64fSJason Wang 		return vring->cb.callback(vring->cb.private);
111*64b9f64fSJason Wang 
112*64b9f64fSJason Wang 	return IRQ_HANDLED;
113*64b9f64fSJason Wang }
114*64b9f64fSJason Wang 
115*64b9f64fSJason Wang static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
116*64b9f64fSJason Wang {
117*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = arg;
118*64b9f64fSJason Wang 
119*64b9f64fSJason Wang 	if (vp_vdpa->config_cb.callback)
120*64b9f64fSJason Wang 		return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
121*64b9f64fSJason Wang 
122*64b9f64fSJason Wang 	return IRQ_HANDLED;
123*64b9f64fSJason Wang }
124*64b9f64fSJason Wang 
125*64b9f64fSJason Wang static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
126*64b9f64fSJason Wang {
127*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
128*64b9f64fSJason Wang 	struct pci_dev *pdev = mdev->pci_dev;
129*64b9f64fSJason Wang 	int i, ret, irq;
130*64b9f64fSJason Wang 	int queues = vp_vdpa->queues;
131*64b9f64fSJason Wang 	int vectors = queues + 1;
132*64b9f64fSJason Wang 
133*64b9f64fSJason Wang 	ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
134*64b9f64fSJason Wang 	if (ret != vectors) {
135*64b9f64fSJason Wang 		dev_err(&pdev->dev,
136*64b9f64fSJason Wang 			"vp_vdpa: fail to allocate irq vectors want %d but %d\n",
137*64b9f64fSJason Wang 			vectors, ret);
138*64b9f64fSJason Wang 		return ret;
139*64b9f64fSJason Wang 	}
140*64b9f64fSJason Wang 
141*64b9f64fSJason Wang 	vp_vdpa->vectors = vectors;
142*64b9f64fSJason Wang 
143*64b9f64fSJason Wang 	for (i = 0; i < queues; i++) {
144*64b9f64fSJason Wang 		snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
145*64b9f64fSJason Wang 			"vp-vdpa[%s]-%d\n", pci_name(pdev), i);
146*64b9f64fSJason Wang 		irq = pci_irq_vector(pdev, i);
147*64b9f64fSJason Wang 		ret = devm_request_irq(&pdev->dev, irq,
148*64b9f64fSJason Wang 				       vp_vdpa_vq_handler,
149*64b9f64fSJason Wang 				       0, vp_vdpa->vring[i].msix_name,
150*64b9f64fSJason Wang 				       &vp_vdpa->vring[i]);
151*64b9f64fSJason Wang 		if (ret) {
152*64b9f64fSJason Wang 			dev_err(&pdev->dev,
153*64b9f64fSJason Wang 				"vp_vdpa: fail to request irq for vq %d\n", i);
154*64b9f64fSJason Wang 			goto err;
155*64b9f64fSJason Wang 		}
156*64b9f64fSJason Wang 		vp_modern_queue_vector(mdev, i, i);
157*64b9f64fSJason Wang 		vp_vdpa->vring[i].irq = irq;
158*64b9f64fSJason Wang 	}
159*64b9f64fSJason Wang 
160*64b9f64fSJason Wang 	snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
161*64b9f64fSJason Wang 		 pci_name(pdev));
162*64b9f64fSJason Wang 	irq = pci_irq_vector(pdev, queues);
163*64b9f64fSJason Wang 	ret = devm_request_irq(&pdev->dev, irq,	vp_vdpa_config_handler, 0,
164*64b9f64fSJason Wang 			       vp_vdpa->msix_name, vp_vdpa);
165*64b9f64fSJason Wang 	if (ret) {
166*64b9f64fSJason Wang 		dev_err(&pdev->dev,
167*64b9f64fSJason Wang 			"vp_vdpa: fail to request irq for vq %d\n", i);
168*64b9f64fSJason Wang 			goto err;
169*64b9f64fSJason Wang 	}
170*64b9f64fSJason Wang 	vp_modern_config_vector(mdev, queues);
171*64b9f64fSJason Wang 	vp_vdpa->config_irq = irq;
172*64b9f64fSJason Wang 
173*64b9f64fSJason Wang 	return 0;
174*64b9f64fSJason Wang err:
175*64b9f64fSJason Wang 	vp_vdpa_free_irq(vp_vdpa);
176*64b9f64fSJason Wang 	return ret;
177*64b9f64fSJason Wang }
178*64b9f64fSJason Wang 
179*64b9f64fSJason Wang static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
180*64b9f64fSJason Wang {
181*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
182*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
183*64b9f64fSJason Wang 	u8 s = vp_vdpa_get_status(vdpa);
184*64b9f64fSJason Wang 
185*64b9f64fSJason Wang 	if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
186*64b9f64fSJason Wang 	    !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
187*64b9f64fSJason Wang 		vp_vdpa_request_irq(vp_vdpa);
188*64b9f64fSJason Wang 	}
189*64b9f64fSJason Wang 
190*64b9f64fSJason Wang 	vp_modern_set_status(mdev, status);
191*64b9f64fSJason Wang 
192*64b9f64fSJason Wang 	if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
193*64b9f64fSJason Wang 	    (s & VIRTIO_CONFIG_S_DRIVER_OK))
194*64b9f64fSJason Wang 		vp_vdpa_free_irq(vp_vdpa);
195*64b9f64fSJason Wang }
196*64b9f64fSJason Wang 
197*64b9f64fSJason Wang static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
198*64b9f64fSJason Wang {
199*64b9f64fSJason Wang 	return VP_VDPA_QUEUE_MAX;
200*64b9f64fSJason Wang }
201*64b9f64fSJason Wang 
202*64b9f64fSJason Wang static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
203*64b9f64fSJason Wang 				struct vdpa_vq_state *state)
204*64b9f64fSJason Wang {
205*64b9f64fSJason Wang 	/* Note that this is not supported by virtio specification, so
206*64b9f64fSJason Wang 	 * we return -EOPNOTSUPP here. This means we can't support live
207*64b9f64fSJason Wang 	 * migration, vhost device start/stop.
208*64b9f64fSJason Wang 	 */
209*64b9f64fSJason Wang 	return -EOPNOTSUPP;
210*64b9f64fSJason Wang }
211*64b9f64fSJason Wang 
212*64b9f64fSJason Wang static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
213*64b9f64fSJason Wang 				const struct vdpa_vq_state *state)
214*64b9f64fSJason Wang {
215*64b9f64fSJason Wang 	/* Note that this is not supported by virtio specification, so
216*64b9f64fSJason Wang 	 * we return -ENOPOTSUPP here. This means we can't support live
217*64b9f64fSJason Wang 	 * migration, vhost device start/stop.
218*64b9f64fSJason Wang 	 */
219*64b9f64fSJason Wang 	return -EOPNOTSUPP;
220*64b9f64fSJason Wang }
221*64b9f64fSJason Wang 
222*64b9f64fSJason Wang static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
223*64b9f64fSJason Wang 			      struct vdpa_callback *cb)
224*64b9f64fSJason Wang {
225*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
226*64b9f64fSJason Wang 
227*64b9f64fSJason Wang 	vp_vdpa->vring[qid].cb = *cb;
228*64b9f64fSJason Wang }
229*64b9f64fSJason Wang 
230*64b9f64fSJason Wang static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
231*64b9f64fSJason Wang 				 u16 qid, bool ready)
232*64b9f64fSJason Wang {
233*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
234*64b9f64fSJason Wang 
235*64b9f64fSJason Wang 	vp_modern_set_queue_enable(mdev, qid, ready);
236*64b9f64fSJason Wang }
237*64b9f64fSJason Wang 
238*64b9f64fSJason Wang static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
239*64b9f64fSJason Wang {
240*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
241*64b9f64fSJason Wang 
242*64b9f64fSJason Wang 	return vp_modern_get_queue_enable(mdev, qid);
243*64b9f64fSJason Wang }
244*64b9f64fSJason Wang 
245*64b9f64fSJason Wang static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
246*64b9f64fSJason Wang 			       u32 num)
247*64b9f64fSJason Wang {
248*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
249*64b9f64fSJason Wang 
250*64b9f64fSJason Wang 	vp_modern_set_queue_size(mdev, qid, num);
251*64b9f64fSJason Wang }
252*64b9f64fSJason Wang 
253*64b9f64fSJason Wang static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
254*64b9f64fSJason Wang 				  u64 desc_area, u64 driver_area,
255*64b9f64fSJason Wang 				  u64 device_area)
256*64b9f64fSJason Wang {
257*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
258*64b9f64fSJason Wang 
259*64b9f64fSJason Wang 	vp_modern_queue_address(mdev, qid, desc_area,
260*64b9f64fSJason Wang 				driver_area, device_area);
261*64b9f64fSJason Wang 
262*64b9f64fSJason Wang 	return 0;
263*64b9f64fSJason Wang }
264*64b9f64fSJason Wang 
265*64b9f64fSJason Wang static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
266*64b9f64fSJason Wang {
267*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
268*64b9f64fSJason Wang 
269*64b9f64fSJason Wang 	vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
270*64b9f64fSJason Wang }
271*64b9f64fSJason Wang 
272*64b9f64fSJason Wang static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
273*64b9f64fSJason Wang {
274*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
275*64b9f64fSJason Wang 
276*64b9f64fSJason Wang 	return vp_modern_generation(mdev);
277*64b9f64fSJason Wang }
278*64b9f64fSJason Wang 
279*64b9f64fSJason Wang static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
280*64b9f64fSJason Wang {
281*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
282*64b9f64fSJason Wang 
283*64b9f64fSJason Wang 	return mdev->id.device;
284*64b9f64fSJason Wang }
285*64b9f64fSJason Wang 
286*64b9f64fSJason Wang static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
287*64b9f64fSJason Wang {
288*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
289*64b9f64fSJason Wang 
290*64b9f64fSJason Wang 	return mdev->id.vendor;
291*64b9f64fSJason Wang }
292*64b9f64fSJason Wang 
293*64b9f64fSJason Wang static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
294*64b9f64fSJason Wang {
295*64b9f64fSJason Wang 	return PAGE_SIZE;
296*64b9f64fSJason Wang }
297*64b9f64fSJason Wang 
298*64b9f64fSJason Wang static void vp_vdpa_get_config(struct vdpa_device *vdpa,
299*64b9f64fSJason Wang 			       unsigned int offset,
300*64b9f64fSJason Wang 			       void *buf, unsigned int len)
301*64b9f64fSJason Wang {
302*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
303*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
304*64b9f64fSJason Wang 	u8 old, new;
305*64b9f64fSJason Wang 	u8 *p;
306*64b9f64fSJason Wang 	int i;
307*64b9f64fSJason Wang 
308*64b9f64fSJason Wang 	do {
309*64b9f64fSJason Wang 		old = vp_ioread8(&mdev->common->config_generation);
310*64b9f64fSJason Wang 		p = buf;
311*64b9f64fSJason Wang 		for (i = 0; i < len; i++)
312*64b9f64fSJason Wang 			*p++ = vp_ioread8(mdev->device + offset + i);
313*64b9f64fSJason Wang 
314*64b9f64fSJason Wang 		new = vp_ioread8(&mdev->common->config_generation);
315*64b9f64fSJason Wang 	} while (old != new);
316*64b9f64fSJason Wang }
317*64b9f64fSJason Wang 
318*64b9f64fSJason Wang static void vp_vdpa_set_config(struct vdpa_device *vdpa,
319*64b9f64fSJason Wang 			       unsigned int offset, const void *buf,
320*64b9f64fSJason Wang 			       unsigned int len)
321*64b9f64fSJason Wang {
322*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
323*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
324*64b9f64fSJason Wang 	const u8 *p = buf;
325*64b9f64fSJason Wang 	int i;
326*64b9f64fSJason Wang 
327*64b9f64fSJason Wang 	for (i = 0; i < len; i++)
328*64b9f64fSJason Wang 		vp_iowrite8(*p++, mdev->device + offset + i);
329*64b9f64fSJason Wang }
330*64b9f64fSJason Wang 
331*64b9f64fSJason Wang static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
332*64b9f64fSJason Wang 				  struct vdpa_callback *cb)
333*64b9f64fSJason Wang {
334*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
335*64b9f64fSJason Wang 
336*64b9f64fSJason Wang 	vp_vdpa->config_cb = *cb;
337*64b9f64fSJason Wang }
338*64b9f64fSJason Wang 
339*64b9f64fSJason Wang static const struct vdpa_config_ops vp_vdpa_ops = {
340*64b9f64fSJason Wang 	.get_features	= vp_vdpa_get_features,
341*64b9f64fSJason Wang 	.set_features	= vp_vdpa_set_features,
342*64b9f64fSJason Wang 	.get_status	= vp_vdpa_get_status,
343*64b9f64fSJason Wang 	.set_status	= vp_vdpa_set_status,
344*64b9f64fSJason Wang 	.get_vq_num_max	= vp_vdpa_get_vq_num_max,
345*64b9f64fSJason Wang 	.get_vq_state	= vp_vdpa_get_vq_state,
346*64b9f64fSJason Wang 	.set_vq_state	= vp_vdpa_set_vq_state,
347*64b9f64fSJason Wang 	.set_vq_cb	= vp_vdpa_set_vq_cb,
348*64b9f64fSJason Wang 	.set_vq_ready	= vp_vdpa_set_vq_ready,
349*64b9f64fSJason Wang 	.get_vq_ready	= vp_vdpa_get_vq_ready,
350*64b9f64fSJason Wang 	.set_vq_num	= vp_vdpa_set_vq_num,
351*64b9f64fSJason Wang 	.set_vq_address	= vp_vdpa_set_vq_address,
352*64b9f64fSJason Wang 	.kick_vq	= vp_vdpa_kick_vq,
353*64b9f64fSJason Wang 	.get_generation	= vp_vdpa_get_generation,
354*64b9f64fSJason Wang 	.get_device_id	= vp_vdpa_get_device_id,
355*64b9f64fSJason Wang 	.get_vendor_id	= vp_vdpa_get_vendor_id,
356*64b9f64fSJason Wang 	.get_vq_align	= vp_vdpa_get_vq_align,
357*64b9f64fSJason Wang 	.get_config	= vp_vdpa_get_config,
358*64b9f64fSJason Wang 	.set_config	= vp_vdpa_set_config,
359*64b9f64fSJason Wang 	.set_config_cb  = vp_vdpa_set_config_cb,
360*64b9f64fSJason Wang };
361*64b9f64fSJason Wang 
362*64b9f64fSJason Wang static void vp_vdpa_free_irq_vectors(void *data)
363*64b9f64fSJason Wang {
364*64b9f64fSJason Wang 	pci_free_irq_vectors(data);
365*64b9f64fSJason Wang }
366*64b9f64fSJason Wang 
367*64b9f64fSJason Wang static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
368*64b9f64fSJason Wang {
369*64b9f64fSJason Wang 	struct virtio_pci_modern_device *mdev;
370*64b9f64fSJason Wang 	struct device *dev = &pdev->dev;
371*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa;
372*64b9f64fSJason Wang 	u16 notify_off;
373*64b9f64fSJason Wang 	int ret, i;
374*64b9f64fSJason Wang 
375*64b9f64fSJason Wang 	ret = pcim_enable_device(pdev);
376*64b9f64fSJason Wang 	if (ret)
377*64b9f64fSJason Wang 		return ret;
378*64b9f64fSJason Wang 
379*64b9f64fSJason Wang 	vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
380*64b9f64fSJason Wang 				    dev, &vp_vdpa_ops, NULL);
381*64b9f64fSJason Wang 	if (vp_vdpa == NULL) {
382*64b9f64fSJason Wang 		dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
383*64b9f64fSJason Wang 		return -ENOMEM;
384*64b9f64fSJason Wang 	}
385*64b9f64fSJason Wang 
386*64b9f64fSJason Wang 	mdev = &vp_vdpa->mdev;
387*64b9f64fSJason Wang 	mdev->pci_dev = pdev;
388*64b9f64fSJason Wang 
389*64b9f64fSJason Wang 	ret = vp_modern_probe(mdev);
390*64b9f64fSJason Wang 	if (ret) {
391*64b9f64fSJason Wang 		dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
392*64b9f64fSJason Wang 		goto err;
393*64b9f64fSJason Wang 	}
394*64b9f64fSJason Wang 
395*64b9f64fSJason Wang 	pci_set_master(pdev);
396*64b9f64fSJason Wang 	pci_set_drvdata(pdev, vp_vdpa);
397*64b9f64fSJason Wang 
398*64b9f64fSJason Wang 	vp_vdpa->vdpa.dma_dev = &pdev->dev;
399*64b9f64fSJason Wang 	vp_vdpa->queues = vp_modern_get_num_queues(mdev);
400*64b9f64fSJason Wang 
401*64b9f64fSJason Wang 	ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
402*64b9f64fSJason Wang 	if (ret) {
403*64b9f64fSJason Wang 		dev_err(&pdev->dev,
404*64b9f64fSJason Wang 			"Failed for adding devres for freeing irq vectors\n");
405*64b9f64fSJason Wang 		goto err;
406*64b9f64fSJason Wang 	}
407*64b9f64fSJason Wang 
408*64b9f64fSJason Wang 	vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
409*64b9f64fSJason Wang 				      sizeof(*vp_vdpa->vring),
410*64b9f64fSJason Wang 				      GFP_KERNEL);
411*64b9f64fSJason Wang 	if (!vp_vdpa->vring) {
412*64b9f64fSJason Wang 		ret = -ENOMEM;
413*64b9f64fSJason Wang 		dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
414*64b9f64fSJason Wang 		goto err;
415*64b9f64fSJason Wang 	}
416*64b9f64fSJason Wang 
417*64b9f64fSJason Wang 	for (i = 0; i < vp_vdpa->queues; i++) {
418*64b9f64fSJason Wang 		notify_off = vp_modern_get_queue_notify_off(mdev, i);
419*64b9f64fSJason Wang 		vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
420*64b9f64fSJason Wang 		vp_vdpa->vring[i].notify = mdev->notify_base +
421*64b9f64fSJason Wang 			notify_off * mdev->notify_offset_multiplier;
422*64b9f64fSJason Wang 	}
423*64b9f64fSJason Wang 	vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
424*64b9f64fSJason Wang 
425*64b9f64fSJason Wang 	ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
426*64b9f64fSJason Wang 	if (ret) {
427*64b9f64fSJason Wang 		dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
428*64b9f64fSJason Wang 		goto err;
429*64b9f64fSJason Wang 	}
430*64b9f64fSJason Wang 
431*64b9f64fSJason Wang 	return 0;
432*64b9f64fSJason Wang 
433*64b9f64fSJason Wang err:
434*64b9f64fSJason Wang 	put_device(&vp_vdpa->vdpa.dev);
435*64b9f64fSJason Wang 	return ret;
436*64b9f64fSJason Wang }
437*64b9f64fSJason Wang 
438*64b9f64fSJason Wang static void vp_vdpa_remove(struct pci_dev *pdev)
439*64b9f64fSJason Wang {
440*64b9f64fSJason Wang 	struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
441*64b9f64fSJason Wang 
442*64b9f64fSJason Wang 	vdpa_unregister_device(&vp_vdpa->vdpa);
443*64b9f64fSJason Wang 	vp_modern_remove(&vp_vdpa->mdev);
444*64b9f64fSJason Wang }
445*64b9f64fSJason Wang 
446*64b9f64fSJason Wang static struct pci_driver vp_vdpa_driver = {
447*64b9f64fSJason Wang 	.name		= "vp-vdpa",
448*64b9f64fSJason Wang 	.id_table	= NULL, /* only dynamic ids */
449*64b9f64fSJason Wang 	.probe		= vp_vdpa_probe,
450*64b9f64fSJason Wang 	.remove		= vp_vdpa_remove,
451*64b9f64fSJason Wang };
452*64b9f64fSJason Wang 
453*64b9f64fSJason Wang module_pci_driver(vp_vdpa_driver);
454*64b9f64fSJason Wang 
455*64b9f64fSJason Wang MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
456*64b9f64fSJason Wang MODULE_DESCRIPTION("vp-vdpa");
457*64b9f64fSJason Wang MODULE_LICENSE("GPL");
458*64b9f64fSJason Wang MODULE_VERSION("1");
459