1 /*
2  * PCI Backend - Provides restricted access to the real PCI bus topology
3  *               to the frontend
4  *
5  *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6  */
7 
8 #include <linux/list.h>
9 #include <linux/pci.h>
10 #include <linux/mutex.h>
11 #include "pciback.h"
12 
13 struct passthrough_dev_data {
14 	/* Access to dev_list must be protected by lock */
15 	struct list_head dev_list;
16 	struct mutex lock;
17 };
18 
19 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
20 					       unsigned int domain,
21 					       unsigned int bus,
22 					       unsigned int devfn)
23 {
24 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
25 	struct pci_dev_entry *dev_entry;
26 	struct pci_dev *dev = NULL;
27 
28 	mutex_lock(&dev_data->lock);
29 
30 	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
31 		if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
32 		    && bus == (unsigned int)dev_entry->dev->bus->number
33 		    && devfn == dev_entry->dev->devfn) {
34 			dev = dev_entry->dev;
35 			break;
36 		}
37 	}
38 
39 	mutex_unlock(&dev_data->lock);
40 
41 	return dev;
42 }
43 
44 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
45 				   struct pci_dev *dev,
46 				   int devid, publish_pci_dev_cb publish_cb)
47 {
48 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
49 	struct pci_dev_entry *dev_entry;
50 	unsigned int domain, bus, devfn;
51 	int err;
52 
53 	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
54 	if (!dev_entry)
55 		return -ENOMEM;
56 	dev_entry->dev = dev;
57 
58 	mutex_lock(&dev_data->lock);
59 	list_add_tail(&dev_entry->list, &dev_data->dev_list);
60 	mutex_unlock(&dev_data->lock);
61 
62 	/* Publish this device. */
63 	domain = (unsigned int)pci_domain_nr(dev->bus);
64 	bus = (unsigned int)dev->bus->number;
65 	devfn = dev->devfn;
66 	err = publish_cb(pdev, domain, bus, devfn, devid);
67 
68 	return err;
69 }
70 
71 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
72 					struct pci_dev *dev, bool lock)
73 {
74 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
75 	struct pci_dev_entry *dev_entry, *t;
76 	struct pci_dev *found_dev = NULL;
77 
78 	mutex_lock(&dev_data->lock);
79 
80 	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
81 		if (dev_entry->dev == dev) {
82 			list_del(&dev_entry->list);
83 			found_dev = dev_entry->dev;
84 			kfree(dev_entry);
85 		}
86 	}
87 
88 	mutex_unlock(&dev_data->lock);
89 
90 	if (found_dev) {
91 		if (lock)
92 			device_lock(&found_dev->dev);
93 		pcistub_put_pci_dev(found_dev);
94 		if (lock)
95 			device_unlock(&found_dev->dev);
96 	}
97 }
98 
99 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
100 {
101 	struct passthrough_dev_data *dev_data;
102 
103 	dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
104 	if (!dev_data)
105 		return -ENOMEM;
106 
107 	mutex_init(&dev_data->lock);
108 
109 	INIT_LIST_HEAD(&dev_data->dev_list);
110 
111 	pdev->pci_dev_data = dev_data;
112 
113 	return 0;
114 }
115 
116 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
117 					 publish_pci_root_cb publish_root_cb)
118 {
119 	int err = 0;
120 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
121 	struct pci_dev_entry *dev_entry, *e;
122 	struct pci_dev *dev;
123 	int found;
124 	unsigned int domain, bus;
125 
126 	mutex_lock(&dev_data->lock);
127 
128 	list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
129 		/* Only publish this device as a root if none of its
130 		 * parent bridges are exported
131 		 */
132 		found = 0;
133 		dev = dev_entry->dev->bus->self;
134 		for (; !found && dev != NULL; dev = dev->bus->self) {
135 			list_for_each_entry(e, &dev_data->dev_list, list) {
136 				if (dev == e->dev) {
137 					found = 1;
138 					break;
139 				}
140 			}
141 		}
142 
143 		domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
144 		bus = (unsigned int)dev_entry->dev->bus->number;
145 
146 		if (!found) {
147 			err = publish_root_cb(pdev, domain, bus);
148 			if (err)
149 				break;
150 		}
151 	}
152 
153 	mutex_unlock(&dev_data->lock);
154 
155 	return err;
156 }
157 
158 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
159 {
160 	struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
161 	struct pci_dev_entry *dev_entry, *t;
162 
163 	list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
164 		struct pci_dev *dev = dev_entry->dev;
165 		list_del(&dev_entry->list);
166 		device_lock(&dev->dev);
167 		pcistub_put_pci_dev(dev);
168 		device_unlock(&dev->dev);
169 		kfree(dev_entry);
170 	}
171 
172 	kfree(dev_data);
173 	pdev->pci_dev_data = NULL;
174 }
175 
176 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
177 					struct xen_pcibk_device *pdev,
178 					unsigned int *domain, unsigned int *bus,
179 					unsigned int *devfn)
180 {
181 	*domain = pci_domain_nr(pcidev->bus);
182 	*bus = pcidev->bus->number;
183 	*devfn = pcidev->devfn;
184 	return 1;
185 }
186 
187 const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
188 	.name           = "passthrough",
189 	.init           = __xen_pcibk_init_devices,
190 	.free		= __xen_pcibk_release_devices,
191 	.find           = __xen_pcibk_get_pcifront_dev,
192 	.publish        = __xen_pcibk_publish_pci_roots,
193 	.release        = __xen_pcibk_release_pci_dev,
194 	.add            = __xen_pcibk_add_pci_dev,
195 	.get            = __xen_pcibk_get_pci_dev,
196 };
197