xref: /openbmc/linux/drivers/virtio/virtio_pci_common.c (revision 52a61516125fa9a21b3bdf4f90928308e2e5573f)
15f4c9760SMichael S. Tsirkin /*
25f4c9760SMichael S. Tsirkin  * Virtio PCI driver - common functionality for all device versions
35f4c9760SMichael S. Tsirkin  *
45f4c9760SMichael S. Tsirkin  * This module allows virtio devices to be used over a virtual PCI device.
55f4c9760SMichael S. Tsirkin  * This can be used with QEMU based VMMs like KVM or Xen.
65f4c9760SMichael S. Tsirkin  *
75f4c9760SMichael S. Tsirkin  * Copyright IBM Corp. 2007
85f4c9760SMichael S. Tsirkin  * Copyright Red Hat, Inc. 2014
95f4c9760SMichael S. Tsirkin  *
105f4c9760SMichael S. Tsirkin  * Authors:
115f4c9760SMichael S. Tsirkin  *  Anthony Liguori  <aliguori@us.ibm.com>
125f4c9760SMichael S. Tsirkin  *  Rusty Russell <rusty@rustcorp.com.au>
135f4c9760SMichael S. Tsirkin  *  Michael S. Tsirkin <mst@redhat.com>
145f4c9760SMichael S. Tsirkin  *
155f4c9760SMichael S. Tsirkin  * This work is licensed under the terms of the GNU GPL, version 2 or later.
165f4c9760SMichael S. Tsirkin  * See the COPYING file in the top-level directory.
175f4c9760SMichael S. Tsirkin  *
185f4c9760SMichael S. Tsirkin  */
195f4c9760SMichael S. Tsirkin 
205f4c9760SMichael S. Tsirkin #include "virtio_pci_common.h"
215f4c9760SMichael S. Tsirkin 
22ac399d8fSMichael S. Tsirkin static bool force_legacy = false;
23ac399d8fSMichael S. Tsirkin 
24ac399d8fSMichael S. Tsirkin #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25ac399d8fSMichael S. Tsirkin module_param(force_legacy, bool, 0444);
26ac399d8fSMichael S. Tsirkin MODULE_PARM_DESC(force_legacy,
27ac399d8fSMichael S. Tsirkin 		 "Force legacy mode for transitional virtio 1 devices");
28ac399d8fSMichael S. Tsirkin #endif
29ac399d8fSMichael S. Tsirkin 
305f4c9760SMichael S. Tsirkin /* wait for pending irq handlers */
315f4c9760SMichael S. Tsirkin void vp_synchronize_vectors(struct virtio_device *vdev)
325f4c9760SMichael S. Tsirkin {
335f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
345f4c9760SMichael S. Tsirkin 	int i;
355f4c9760SMichael S. Tsirkin 
3607ec5148SChristoph Hellwig 	synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0));
3707ec5148SChristoph Hellwig 	for (i = 1; i < vp_dev->msix_vectors; i++)
38fa3a3279SChristoph Hellwig 		synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
395f4c9760SMichael S. Tsirkin }
405f4c9760SMichael S. Tsirkin 
415f4c9760SMichael S. Tsirkin /* the notify function used when creating a virt queue */
425f4c9760SMichael S. Tsirkin bool vp_notify(struct virtqueue *vq)
435f4c9760SMichael S. Tsirkin {
445f4c9760SMichael S. Tsirkin 	/* we write the queue's selector into the notification register to
455f4c9760SMichael S. Tsirkin 	 * signal the other end */
465f4c9760SMichael S. Tsirkin 	iowrite16(vq->index, (void __iomem *)vq->priv);
475f4c9760SMichael S. Tsirkin 	return true;
485f4c9760SMichael S. Tsirkin }
495f4c9760SMichael S. Tsirkin 
505f4c9760SMichael S. Tsirkin /* Handle a configuration change: Tell driver if it wants to know. */
515f4c9760SMichael S. Tsirkin static irqreturn_t vp_config_changed(int irq, void *opaque)
525f4c9760SMichael S. Tsirkin {
535f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = opaque;
545f4c9760SMichael S. Tsirkin 
555f4c9760SMichael S. Tsirkin 	virtio_config_changed(&vp_dev->vdev);
565f4c9760SMichael S. Tsirkin 	return IRQ_HANDLED;
575f4c9760SMichael S. Tsirkin }
585f4c9760SMichael S. Tsirkin 
595f4c9760SMichael S. Tsirkin /* Notify all virtqueues on an interrupt. */
605f4c9760SMichael S. Tsirkin static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
615f4c9760SMichael S. Tsirkin {
625f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = opaque;
635f4c9760SMichael S. Tsirkin 	irqreturn_t ret = IRQ_NONE;
645c34d002SChristoph Hellwig 	struct virtqueue *vq;
655f4c9760SMichael S. Tsirkin 
665c34d002SChristoph Hellwig 	list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
675c34d002SChristoph Hellwig 		if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
685f4c9760SMichael S. Tsirkin 			ret = IRQ_HANDLED;
695f4c9760SMichael S. Tsirkin 	}
705f4c9760SMichael S. Tsirkin 
715f4c9760SMichael S. Tsirkin 	return ret;
725f4c9760SMichael S. Tsirkin }
735f4c9760SMichael S. Tsirkin 
745f4c9760SMichael S. Tsirkin /* A small wrapper to also acknowledge the interrupt when it's handled.
755f4c9760SMichael S. Tsirkin  * I really need an EIO hook for the vring so I can ack the interrupt once we
765f4c9760SMichael S. Tsirkin  * know that we'll be handling the IRQ but before we invoke the callback since
775f4c9760SMichael S. Tsirkin  * the callback may notify the host which results in the host attempting to
785f4c9760SMichael S. Tsirkin  * raise an interrupt that we would then mask once we acknowledged the
795f4c9760SMichael S. Tsirkin  * interrupt. */
805f4c9760SMichael S. Tsirkin static irqreturn_t vp_interrupt(int irq, void *opaque)
815f4c9760SMichael S. Tsirkin {
825f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = opaque;
835f4c9760SMichael S. Tsirkin 	u8 isr;
845f4c9760SMichael S. Tsirkin 
855f4c9760SMichael S. Tsirkin 	/* reading the ISR has the effect of also clearing it so it's very
865f4c9760SMichael S. Tsirkin 	 * important to save off the value. */
875f4c9760SMichael S. Tsirkin 	isr = ioread8(vp_dev->isr);
885f4c9760SMichael S. Tsirkin 
895f4c9760SMichael S. Tsirkin 	/* It's definitely not us if the ISR was not high */
905f4c9760SMichael S. Tsirkin 	if (!isr)
915f4c9760SMichael S. Tsirkin 		return IRQ_NONE;
925f4c9760SMichael S. Tsirkin 
935f4c9760SMichael S. Tsirkin 	/* Configuration change?  Tell driver if it wants to know. */
945f4c9760SMichael S. Tsirkin 	if (isr & VIRTIO_PCI_ISR_CONFIG)
955f4c9760SMichael S. Tsirkin 		vp_config_changed(irq, opaque);
965f4c9760SMichael S. Tsirkin 
975f4c9760SMichael S. Tsirkin 	return vp_vring_interrupt(irq, opaque);
985f4c9760SMichael S. Tsirkin }
995f4c9760SMichael S. Tsirkin 
10007ec5148SChristoph Hellwig static void vp_remove_vqs(struct virtio_device *vdev)
1015f4c9760SMichael S. Tsirkin {
1025f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
1035f4c9760SMichael S. Tsirkin 	struct virtqueue *vq, *n;
1045f4c9760SMichael S. Tsirkin 
1055f4c9760SMichael S. Tsirkin 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
1065c34d002SChristoph Hellwig 		if (vp_dev->msix_vector_map) {
1075c34d002SChristoph Hellwig 			int v = vp_dev->msix_vector_map[vq->index];
108fa3a3279SChristoph Hellwig 
109fa3a3279SChristoph Hellwig 			if (v != VIRTIO_MSI_NO_VECTOR)
110fa3a3279SChristoph Hellwig 				free_irq(pci_irq_vector(vp_dev->pci_dev, v),
1115f4c9760SMichael S. Tsirkin 					vq);
112fa3a3279SChristoph Hellwig 		}
1135c34d002SChristoph Hellwig 		vp_dev->del_vq(vq);
1145f4c9760SMichael S. Tsirkin 	}
11566f2f555SChristoph Hellwig }
11666f2f555SChristoph Hellwig 
11707ec5148SChristoph Hellwig /* the config->del_vqs() implementation */
11807ec5148SChristoph Hellwig void vp_del_vqs(struct virtio_device *vdev)
11907ec5148SChristoph Hellwig {
12007ec5148SChristoph Hellwig 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
12107ec5148SChristoph Hellwig 	int i;
12266f2f555SChristoph Hellwig 
12307ec5148SChristoph Hellwig 	if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
12407ec5148SChristoph Hellwig 		return;
12507ec5148SChristoph Hellwig 
12607ec5148SChristoph Hellwig 	vp_remove_vqs(vdev);
12766f2f555SChristoph Hellwig 
12853a020c6SChristoph Hellwig 	if (vp_dev->pci_dev->msix_enabled) {
12907ec5148SChristoph Hellwig 		for (i = 0; i < vp_dev->msix_vectors; i++)
13007ec5148SChristoph Hellwig 			free_cpumask_var(vp_dev->msix_affinity_masks[i]);
13107ec5148SChristoph Hellwig 
13266f2f555SChristoph Hellwig 		/* Disable the vector used for configuration */
13366f2f555SChristoph Hellwig 		vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
13466f2f555SChristoph Hellwig 
13507ec5148SChristoph Hellwig 		kfree(vp_dev->msix_affinity_masks);
13607ec5148SChristoph Hellwig 		kfree(vp_dev->msix_names);
13707ec5148SChristoph Hellwig 		kfree(vp_dev->msix_vector_map);
13866f2f555SChristoph Hellwig 	}
13966f2f555SChristoph Hellwig 
14007ec5148SChristoph Hellwig 	free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
14107ec5148SChristoph Hellwig 	pci_free_irq_vectors(vp_dev->pci_dev);
1425f4c9760SMichael S. Tsirkin }
1435f4c9760SMichael S. Tsirkin 
144a3cbec69SChristoph Hellwig static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
145*52a61516SChristoph Hellwig 		struct virtqueue *vqs[], vq_callback_t *callbacks[],
146*52a61516SChristoph Hellwig 		const char * const names[])
1475f4c9760SMichael S. Tsirkin {
1485f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
14907ec5148SChristoph Hellwig 	const char *name = dev_name(&vp_dev->vdev.dev);
15007ec5148SChristoph Hellwig 	int i, err = -ENOMEM, allocated_vectors, nvectors;
151*52a61516SChristoph Hellwig 	bool shared = false;
1525f4c9760SMichael S. Tsirkin 	u16 msix_vec;
15307ec5148SChristoph Hellwig 
15407ec5148SChristoph Hellwig 	nvectors = 1;
15507ec5148SChristoph Hellwig 	for (i = 0; i < nvqs; i++)
15607ec5148SChristoph Hellwig 		if (callbacks[i])
15707ec5148SChristoph Hellwig 			nvectors++;
1585f4c9760SMichael S. Tsirkin 
159*52a61516SChristoph Hellwig 	/* Try one vector per queue first. */
16007ec5148SChristoph Hellwig 	err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors,
16107ec5148SChristoph Hellwig 			PCI_IRQ_MSIX);
162*52a61516SChristoph Hellwig 	if (err < 0) {
163*52a61516SChristoph Hellwig 		/* Fallback to one vector for config, one shared for queues. */
164*52a61516SChristoph Hellwig 		shared = true;
16507ec5148SChristoph Hellwig 		err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
16607ec5148SChristoph Hellwig 				PCI_IRQ_MSIX);
167*52a61516SChristoph Hellwig 		if (err < 0)
168*52a61516SChristoph Hellwig 			return err;
16907ec5148SChristoph Hellwig 	}
17007ec5148SChristoph Hellwig 	if (err < 0)
17107ec5148SChristoph Hellwig 		return err;
17207ec5148SChristoph Hellwig 
17307ec5148SChristoph Hellwig 	vp_dev->msix_vectors = nvectors;
17407ec5148SChristoph Hellwig 	vp_dev->msix_names = kmalloc_array(nvectors,
17507ec5148SChristoph Hellwig 			sizeof(*vp_dev->msix_names), GFP_KERNEL);
17607ec5148SChristoph Hellwig 	if (!vp_dev->msix_names)
17707ec5148SChristoph Hellwig 		goto out_free_irq_vectors;
17807ec5148SChristoph Hellwig 
17907ec5148SChristoph Hellwig 	vp_dev->msix_affinity_masks = kcalloc(nvectors,
18007ec5148SChristoph Hellwig 			sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
18107ec5148SChristoph Hellwig 	if (!vp_dev->msix_affinity_masks)
18207ec5148SChristoph Hellwig 		goto out_free_msix_names;
18307ec5148SChristoph Hellwig 
18407ec5148SChristoph Hellwig 	for (i = 0; i < nvectors; ++i) {
18507ec5148SChristoph Hellwig 		if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
18607ec5148SChristoph Hellwig 				GFP_KERNEL))
18707ec5148SChristoph Hellwig 			goto out_free_msix_affinity_masks;
1885f4c9760SMichael S. Tsirkin 	}
1895f4c9760SMichael S. Tsirkin 
19007ec5148SChristoph Hellwig 	/* Set the vector used for configuration */
19107ec5148SChristoph Hellwig 	snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names),
19207ec5148SChristoph Hellwig 		 "%s-config", name);
19307ec5148SChristoph Hellwig 	err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
19407ec5148SChristoph Hellwig 			0, vp_dev->msix_names[0], vp_dev);
1955f4c9760SMichael S. Tsirkin 	if (err)
196*52a61516SChristoph Hellwig 		goto out_free_msix_affinity_masks;
1975f4c9760SMichael S. Tsirkin 
19807ec5148SChristoph Hellwig 	/* Verify we had enough resources to assign the vector */
19907ec5148SChristoph Hellwig 	if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) {
20007ec5148SChristoph Hellwig 		err = -EBUSY;
20107ec5148SChristoph Hellwig 		goto out_free_config_irq;
20207ec5148SChristoph Hellwig 	}
20307ec5148SChristoph Hellwig 
2045c34d002SChristoph Hellwig 	vp_dev->msix_vector_map = kmalloc_array(nvqs,
2055c34d002SChristoph Hellwig 			sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
2065c34d002SChristoph Hellwig 	if (!vp_dev->msix_vector_map)
20707ec5148SChristoph Hellwig 		goto out_disable_config_irq;
2085c34d002SChristoph Hellwig 
20907ec5148SChristoph Hellwig 	allocated_vectors = 1; /* vector 0 is the config interrupt */
2105f4c9760SMichael S. Tsirkin 	for (i = 0; i < nvqs; ++i) {
2115f4c9760SMichael S. Tsirkin 		if (!names[i]) {
2125f4c9760SMichael S. Tsirkin 			vqs[i] = NULL;
2135f4c9760SMichael S. Tsirkin 			continue;
214a3cbec69SChristoph Hellwig 		}
215a3cbec69SChristoph Hellwig 
21607ec5148SChristoph Hellwig 		if (callbacks[i])
21707ec5148SChristoph Hellwig 			msix_vec = allocated_vectors;
2185f4c9760SMichael S. Tsirkin 		else
21907ec5148SChristoph Hellwig 			msix_vec = VIRTIO_MSI_NO_VECTOR;
22007ec5148SChristoph Hellwig 
2215c34d002SChristoph Hellwig 		vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
2225c34d002SChristoph Hellwig 				msix_vec);
2235f4c9760SMichael S. Tsirkin 		if (IS_ERR(vqs[i])) {
2245f4c9760SMichael S. Tsirkin 			err = PTR_ERR(vqs[i]);
22507ec5148SChristoph Hellwig 			goto out_remove_vqs;
2265f4c9760SMichael S. Tsirkin 		}
2275f4c9760SMichael S. Tsirkin 
2285c34d002SChristoph Hellwig 		if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
2295c34d002SChristoph Hellwig 			vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
2305c34d002SChristoph Hellwig 			continue;
2315c34d002SChristoph Hellwig 		}
2325c34d002SChristoph Hellwig 
23307ec5148SChristoph Hellwig 		snprintf(vp_dev->msix_names[i + 1],
23407ec5148SChristoph Hellwig 			 sizeof(*vp_dev->msix_names), "%s-%s",
2355f4c9760SMichael S. Tsirkin 			 dev_name(&vp_dev->vdev.dev), names[i]);
236fa3a3279SChristoph Hellwig 		err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
23707ec5148SChristoph Hellwig 				  vring_interrupt, IRQF_SHARED,
23807ec5148SChristoph Hellwig 				  vp_dev->msix_names[i + 1], vqs[i]);
2395c34d002SChristoph Hellwig 		if (err) {
2405c34d002SChristoph Hellwig 			/* don't free this irq on error */
2415c34d002SChristoph Hellwig 			vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
24207ec5148SChristoph Hellwig 			goto out_remove_vqs;
2435f4c9760SMichael S. Tsirkin 		}
2445c34d002SChristoph Hellwig 		vp_dev->msix_vector_map[i] = msix_vec;
24507ec5148SChristoph Hellwig 
246*52a61516SChristoph Hellwig 		/*
247*52a61516SChristoph Hellwig 		 * Use a different vector for each queue if they are available,
248*52a61516SChristoph Hellwig 		 * else share the same vector for all VQs.
249*52a61516SChristoph Hellwig 		 */
250*52a61516SChristoph Hellwig 		if (!shared)
25107ec5148SChristoph Hellwig 			allocated_vectors++;
2525c34d002SChristoph Hellwig 	}
25307ec5148SChristoph Hellwig 
2545f4c9760SMichael S. Tsirkin 	return 0;
2555f4c9760SMichael S. Tsirkin 
25607ec5148SChristoph Hellwig out_remove_vqs:
25707ec5148SChristoph Hellwig 	vp_remove_vqs(vdev);
25807ec5148SChristoph Hellwig 	kfree(vp_dev->msix_vector_map);
25907ec5148SChristoph Hellwig out_disable_config_irq:
26007ec5148SChristoph Hellwig 	vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
26107ec5148SChristoph Hellwig out_free_config_irq:
26207ec5148SChristoph Hellwig 	free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
26307ec5148SChristoph Hellwig out_free_msix_affinity_masks:
26407ec5148SChristoph Hellwig 	for (i = 0; i < nvectors; i++) {
26507ec5148SChristoph Hellwig 		if (vp_dev->msix_affinity_masks[i])
26607ec5148SChristoph Hellwig 			free_cpumask_var(vp_dev->msix_affinity_masks[i]);
26707ec5148SChristoph Hellwig 	}
26807ec5148SChristoph Hellwig 	kfree(vp_dev->msix_affinity_masks);
26907ec5148SChristoph Hellwig out_free_msix_names:
27007ec5148SChristoph Hellwig 	kfree(vp_dev->msix_names);
27107ec5148SChristoph Hellwig out_free_irq_vectors:
27207ec5148SChristoph Hellwig 	pci_free_irq_vectors(vp_dev->pci_dev);
2735f4c9760SMichael S. Tsirkin 	return err;
2745f4c9760SMichael S. Tsirkin }
2755f4c9760SMichael S. Tsirkin 
276a3cbec69SChristoph Hellwig static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
277a3cbec69SChristoph Hellwig 		struct virtqueue *vqs[], vq_callback_t *callbacks[],
278a3cbec69SChristoph Hellwig 		const char * const names[])
279a3cbec69SChristoph Hellwig {
280a3cbec69SChristoph Hellwig 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
281a3cbec69SChristoph Hellwig 	int i, err;
282a3cbec69SChristoph Hellwig 
283a3cbec69SChristoph Hellwig 	err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
284a3cbec69SChristoph Hellwig 			dev_name(&vdev->dev), vp_dev);
285a3cbec69SChristoph Hellwig 	if (err)
28607ec5148SChristoph Hellwig 		return err;
287a3cbec69SChristoph Hellwig 
288a3cbec69SChristoph Hellwig 	for (i = 0; i < nvqs; ++i) {
289a3cbec69SChristoph Hellwig 		if (!names[i]) {
290a3cbec69SChristoph Hellwig 			vqs[i] = NULL;
291a3cbec69SChristoph Hellwig 			continue;
292a3cbec69SChristoph Hellwig 		}
2935c34d002SChristoph Hellwig 		vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
294a3cbec69SChristoph Hellwig 				VIRTIO_MSI_NO_VECTOR);
295a3cbec69SChristoph Hellwig 		if (IS_ERR(vqs[i])) {
296a3cbec69SChristoph Hellwig 			err = PTR_ERR(vqs[i]);
29707ec5148SChristoph Hellwig 			goto out_remove_vqs;
298a3cbec69SChristoph Hellwig 		}
299a3cbec69SChristoph Hellwig 	}
300a3cbec69SChristoph Hellwig 
301a3cbec69SChristoph Hellwig 	return 0;
30207ec5148SChristoph Hellwig 
30307ec5148SChristoph Hellwig out_remove_vqs:
30407ec5148SChristoph Hellwig 	vp_remove_vqs(vdev);
30507ec5148SChristoph Hellwig 	free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
306a3cbec69SChristoph Hellwig 	return err;
307a3cbec69SChristoph Hellwig }
308a3cbec69SChristoph Hellwig 
3095f4c9760SMichael S. Tsirkin /* the config->find_vqs() implementation */
3105f4c9760SMichael S. Tsirkin int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
3115f4c9760SMichael S. Tsirkin 		struct virtqueue *vqs[],
3125f4c9760SMichael S. Tsirkin 		vq_callback_t *callbacks[],
313f7ad26ffSStefan Hajnoczi 		const char * const names[])
3145f4c9760SMichael S. Tsirkin {
3155f4c9760SMichael S. Tsirkin 	int err;
3165f4c9760SMichael S. Tsirkin 
317*52a61516SChristoph Hellwig 	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names);
3185f4c9760SMichael S. Tsirkin 	if (!err)
3195f4c9760SMichael S. Tsirkin 		return 0;
320a3cbec69SChristoph Hellwig 	return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
3215f4c9760SMichael S. Tsirkin }
3225f4c9760SMichael S. Tsirkin 
3235f4c9760SMichael S. Tsirkin const char *vp_bus_name(struct virtio_device *vdev)
3245f4c9760SMichael S. Tsirkin {
3255f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
3265f4c9760SMichael S. Tsirkin 
3275f4c9760SMichael S. Tsirkin 	return pci_name(vp_dev->pci_dev);
3285f4c9760SMichael S. Tsirkin }
3295f4c9760SMichael S. Tsirkin 
3305f4c9760SMichael S. Tsirkin /* Setup the affinity for a virtqueue:
3315f4c9760SMichael S. Tsirkin  * - force the affinity for per vq vector
3325f4c9760SMichael S. Tsirkin  * - OR over all affinities for shared MSI
3335f4c9760SMichael S. Tsirkin  * - ignore the affinity request if we're using INTX
3345f4c9760SMichael S. Tsirkin  */
3355f4c9760SMichael S. Tsirkin int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
3365f4c9760SMichael S. Tsirkin {
3375f4c9760SMichael S. Tsirkin 	struct virtio_device *vdev = vq->vdev;
3385f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
3395f4c9760SMichael S. Tsirkin 
3405f4c9760SMichael S. Tsirkin 	if (!vq->callback)
3415f4c9760SMichael S. Tsirkin 		return -EINVAL;
3425f4c9760SMichael S. Tsirkin 
34353a020c6SChristoph Hellwig 	if (vp_dev->pci_dev->msix_enabled) {
3445c34d002SChristoph Hellwig 		int vec = vp_dev->msix_vector_map[vq->index];
3455c34d002SChristoph Hellwig 		struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
3465c34d002SChristoph Hellwig 		unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
3475c34d002SChristoph Hellwig 
3485f4c9760SMichael S. Tsirkin 		if (cpu == -1)
3495f4c9760SMichael S. Tsirkin 			irq_set_affinity_hint(irq, NULL);
3505f4c9760SMichael S. Tsirkin 		else {
351210d150eSJiang Liu 			cpumask_clear(mask);
3525f4c9760SMichael S. Tsirkin 			cpumask_set_cpu(cpu, mask);
3535f4c9760SMichael S. Tsirkin 			irq_set_affinity_hint(irq, mask);
3545f4c9760SMichael S. Tsirkin 		}
3555f4c9760SMichael S. Tsirkin 	}
3565f4c9760SMichael S. Tsirkin 	return 0;
3575f4c9760SMichael S. Tsirkin }
3585f4c9760SMichael S. Tsirkin 
3595f4c9760SMichael S. Tsirkin #ifdef CONFIG_PM_SLEEP
3605f4c9760SMichael S. Tsirkin static int virtio_pci_freeze(struct device *dev)
3615f4c9760SMichael S. Tsirkin {
3625f4c9760SMichael S. Tsirkin 	struct pci_dev *pci_dev = to_pci_dev(dev);
3635f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
3645f4c9760SMichael S. Tsirkin 	int ret;
3655f4c9760SMichael S. Tsirkin 
3665f4c9760SMichael S. Tsirkin 	ret = virtio_device_freeze(&vp_dev->vdev);
3675f4c9760SMichael S. Tsirkin 
3685f4c9760SMichael S. Tsirkin 	if (!ret)
3695f4c9760SMichael S. Tsirkin 		pci_disable_device(pci_dev);
3705f4c9760SMichael S. Tsirkin 	return ret;
3715f4c9760SMichael S. Tsirkin }
3725f4c9760SMichael S. Tsirkin 
3735f4c9760SMichael S. Tsirkin static int virtio_pci_restore(struct device *dev)
3745f4c9760SMichael S. Tsirkin {
3755f4c9760SMichael S. Tsirkin 	struct pci_dev *pci_dev = to_pci_dev(dev);
3765f4c9760SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
3775f4c9760SMichael S. Tsirkin 	int ret;
3785f4c9760SMichael S. Tsirkin 
3795f4c9760SMichael S. Tsirkin 	ret = pci_enable_device(pci_dev);
3805f4c9760SMichael S. Tsirkin 	if (ret)
3815f4c9760SMichael S. Tsirkin 		return ret;
3825f4c9760SMichael S. Tsirkin 
3835f4c9760SMichael S. Tsirkin 	pci_set_master(pci_dev);
3845f4c9760SMichael S. Tsirkin 	return virtio_device_restore(&vp_dev->vdev);
3855f4c9760SMichael S. Tsirkin }
3865f4c9760SMichael S. Tsirkin 
3879a4253dbSMichael S. Tsirkin static const struct dev_pm_ops virtio_pci_pm_ops = {
3885f4c9760SMichael S. Tsirkin 	SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
3895f4c9760SMichael S. Tsirkin };
3905f4c9760SMichael S. Tsirkin #endif
3919a4253dbSMichael S. Tsirkin 
3929a4253dbSMichael S. Tsirkin 
3939a4253dbSMichael S. Tsirkin /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
3949a4253dbSMichael S. Tsirkin static const struct pci_device_id virtio_pci_id_table[] = {
395caf02abfSRobin H. Johnson 	{ PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
3969a4253dbSMichael S. Tsirkin 	{ 0 }
3979a4253dbSMichael S. Tsirkin };
3989a4253dbSMichael S. Tsirkin 
3999a4253dbSMichael S. Tsirkin MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
4009a4253dbSMichael S. Tsirkin 
401ff31d2e2SMichael S. Tsirkin static void virtio_pci_release_dev(struct device *_d)
402ff31d2e2SMichael S. Tsirkin {
403ff31d2e2SMichael S. Tsirkin 	struct virtio_device *vdev = dev_to_virtio(_d);
404ff31d2e2SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
405ff31d2e2SMichael S. Tsirkin 
406ff31d2e2SMichael S. Tsirkin 	/* As struct device is a kobject, it's not safe to
407ff31d2e2SMichael S. Tsirkin 	 * free the memory (including the reference counter itself)
408ff31d2e2SMichael S. Tsirkin 	 * until it's release callback. */
409ff31d2e2SMichael S. Tsirkin 	kfree(vp_dev);
410ff31d2e2SMichael S. Tsirkin }
411ff31d2e2SMichael S. Tsirkin 
4129a4253dbSMichael S. Tsirkin static int virtio_pci_probe(struct pci_dev *pci_dev,
4139a4253dbSMichael S. Tsirkin 			    const struct pci_device_id *id)
4149a4253dbSMichael S. Tsirkin {
415ff31d2e2SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev;
416ff31d2e2SMichael S. Tsirkin 	int rc;
417ff31d2e2SMichael S. Tsirkin 
418ff31d2e2SMichael S. Tsirkin 	/* allocate our structure and fill it out */
419ff31d2e2SMichael S. Tsirkin 	vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
420ff31d2e2SMichael S. Tsirkin 	if (!vp_dev)
421ff31d2e2SMichael S. Tsirkin 		return -ENOMEM;
422ff31d2e2SMichael S. Tsirkin 
423ff31d2e2SMichael S. Tsirkin 	pci_set_drvdata(pci_dev, vp_dev);
424ff31d2e2SMichael S. Tsirkin 	vp_dev->vdev.dev.parent = &pci_dev->dev;
425ff31d2e2SMichael S. Tsirkin 	vp_dev->vdev.dev.release = virtio_pci_release_dev;
426ff31d2e2SMichael S. Tsirkin 	vp_dev->pci_dev = pci_dev;
427ff31d2e2SMichael S. Tsirkin 
428ff31d2e2SMichael S. Tsirkin 	/* enable the device */
429ff31d2e2SMichael S. Tsirkin 	rc = pci_enable_device(pci_dev);
430ff31d2e2SMichael S. Tsirkin 	if (rc)
431ff31d2e2SMichael S. Tsirkin 		goto err_enable_device;
432ff31d2e2SMichael S. Tsirkin 
433ac399d8fSMichael S. Tsirkin 	if (force_legacy) {
434ac399d8fSMichael S. Tsirkin 		rc = virtio_pci_legacy_probe(vp_dev);
435ac399d8fSMichael S. Tsirkin 		/* Also try modern mode if we can't map BAR0 (no IO space). */
436ac399d8fSMichael S. Tsirkin 		if (rc == -ENODEV || rc == -ENOMEM)
437ac399d8fSMichael S. Tsirkin 			rc = virtio_pci_modern_probe(vp_dev);
438ac399d8fSMichael S. Tsirkin 		if (rc)
439ac399d8fSMichael S. Tsirkin 			goto err_probe;
440ac399d8fSMichael S. Tsirkin 	} else {
4411fcf0512SMichael S. Tsirkin 		rc = virtio_pci_modern_probe(vp_dev);
4421fcf0512SMichael S. Tsirkin 		if (rc == -ENODEV)
443ff31d2e2SMichael S. Tsirkin 			rc = virtio_pci_legacy_probe(vp_dev);
444ff31d2e2SMichael S. Tsirkin 		if (rc)
445ff31d2e2SMichael S. Tsirkin 			goto err_probe;
446ac399d8fSMichael S. Tsirkin 	}
447ff31d2e2SMichael S. Tsirkin 
448ff31d2e2SMichael S. Tsirkin 	pci_set_master(pci_dev);
449ff31d2e2SMichael S. Tsirkin 
450ff31d2e2SMichael S. Tsirkin 	rc = register_virtio_device(&vp_dev->vdev);
451ff31d2e2SMichael S. Tsirkin 	if (rc)
452ff31d2e2SMichael S. Tsirkin 		goto err_register;
453ff31d2e2SMichael S. Tsirkin 
454ff31d2e2SMichael S. Tsirkin 	return 0;
455ff31d2e2SMichael S. Tsirkin 
456ff31d2e2SMichael S. Tsirkin err_register:
4571fcf0512SMichael S. Tsirkin 	if (vp_dev->ioaddr)
458ff31d2e2SMichael S. Tsirkin 	     virtio_pci_legacy_remove(vp_dev);
4591fcf0512SMichael S. Tsirkin 	else
4601fcf0512SMichael S. Tsirkin 	     virtio_pci_modern_remove(vp_dev);
461ff31d2e2SMichael S. Tsirkin err_probe:
462ff31d2e2SMichael S. Tsirkin 	pci_disable_device(pci_dev);
463ff31d2e2SMichael S. Tsirkin err_enable_device:
464ff31d2e2SMichael S. Tsirkin 	kfree(vp_dev);
465ff31d2e2SMichael S. Tsirkin 	return rc;
4669a4253dbSMichael S. Tsirkin }
4679a4253dbSMichael S. Tsirkin 
4689a4253dbSMichael S. Tsirkin static void virtio_pci_remove(struct pci_dev *pci_dev)
4699a4253dbSMichael S. Tsirkin {
470ff31d2e2SMichael S. Tsirkin 	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
4712989be09SMichael S. Tsirkin 	struct device *dev = get_device(&vp_dev->vdev.dev);
472ff31d2e2SMichael S. Tsirkin 
473ff31d2e2SMichael S. Tsirkin 	unregister_virtio_device(&vp_dev->vdev);
474ff31d2e2SMichael S. Tsirkin 
4751fcf0512SMichael S. Tsirkin 	if (vp_dev->ioaddr)
4761fcf0512SMichael S. Tsirkin 		virtio_pci_legacy_remove(vp_dev);
4771fcf0512SMichael S. Tsirkin 	else
4781fcf0512SMichael S. Tsirkin 		virtio_pci_modern_remove(vp_dev);
479ff31d2e2SMichael S. Tsirkin 
480ff31d2e2SMichael S. Tsirkin 	pci_disable_device(pci_dev);
4812989be09SMichael S. Tsirkin 	put_device(dev);
4829a4253dbSMichael S. Tsirkin }
4839a4253dbSMichael S. Tsirkin 
4849a4253dbSMichael S. Tsirkin static struct pci_driver virtio_pci_driver = {
4859a4253dbSMichael S. Tsirkin 	.name		= "virtio-pci",
4869a4253dbSMichael S. Tsirkin 	.id_table	= virtio_pci_id_table,
4879a4253dbSMichael S. Tsirkin 	.probe		= virtio_pci_probe,
4889a4253dbSMichael S. Tsirkin 	.remove		= virtio_pci_remove,
4899a4253dbSMichael S. Tsirkin #ifdef CONFIG_PM_SLEEP
4909a4253dbSMichael S. Tsirkin 	.driver.pm	= &virtio_pci_pm_ops,
4919a4253dbSMichael S. Tsirkin #endif
4929a4253dbSMichael S. Tsirkin };
4939a4253dbSMichael S. Tsirkin 
4949a4253dbSMichael S. Tsirkin module_pci_driver(virtio_pci_driver);
4955ff16110SHerbert Xu 
4965ff16110SHerbert Xu MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
4975ff16110SHerbert Xu MODULE_DESCRIPTION("virtio-pci");
4985ff16110SHerbert Xu MODULE_LICENSE("GPL");
4995ff16110SHerbert Xu MODULE_VERSION("1");
500