xref: /openbmc/linux/drivers/dca/dca-core.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This driver supports an interface for DCA clients and providers to meet.
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
31 
32 #define DCA_VERSION "1.12.1"
33 
34 MODULE_VERSION(DCA_VERSION);
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Intel Corporation");
37 
38 static DEFINE_SPINLOCK(dca_lock);
39 
40 static LIST_HEAD(dca_domains);
41 
42 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43 
44 static int dca_providers_blocked;
45 
46 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
47 {
48 	struct pci_dev *pdev = to_pci_dev(dev);
49 	struct pci_bus *bus = pdev->bus;
50 
51 	while (bus->parent)
52 		bus = bus->parent;
53 
54 	return bus;
55 }
56 
57 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
58 {
59 	struct dca_domain *domain;
60 
61 	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
62 	if (!domain)
63 		return NULL;
64 
65 	INIT_LIST_HEAD(&domain->dca_providers);
66 	domain->pci_rc = rc;
67 
68 	return domain;
69 }
70 
71 static void dca_free_domain(struct dca_domain *domain)
72 {
73 	list_del(&domain->node);
74 	kfree(domain);
75 }
76 
77 static int dca_provider_ioat_ver_3_0(struct device *dev)
78 {
79 	struct pci_dev *pdev = to_pci_dev(dev);
80 
81 	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90 }
91 
92 static void unregister_dca_providers(void)
93 {
94 	struct dca_provider *dca, *_dca;
95 	struct list_head unregistered_providers;
96 	struct dca_domain *domain;
97 	unsigned long flags;
98 
99 	blocking_notifier_call_chain(&dca_provider_chain,
100 				     DCA_PROVIDER_REMOVE, NULL);
101 
102 	INIT_LIST_HEAD(&unregistered_providers);
103 
104 	spin_lock_irqsave(&dca_lock, flags);
105 
106 	if (list_empty(&dca_domains)) {
107 		spin_unlock_irqrestore(&dca_lock, flags);
108 		return;
109 	}
110 
111 	/* at this point only one domain in the list is expected */
112 	domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 
114 	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
115 		list_del(&dca->node);
116 		list_add(&dca->node, &unregistered_providers);
117 	}
118 
119 	dca_free_domain(domain);
120 
121 	spin_unlock_irqrestore(&dca_lock, flags);
122 
123 	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
124 		dca_sysfs_remove_provider(dca);
125 		list_del(&dca->node);
126 	}
127 }
128 
129 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
130 {
131 	struct dca_domain *domain;
132 
133 	list_for_each_entry(domain, &dca_domains, node)
134 		if (domain->pci_rc == rc)
135 			return domain;
136 
137 	return NULL;
138 }
139 
140 static struct dca_domain *dca_get_domain(struct device *dev)
141 {
142 	struct pci_bus *rc;
143 	struct dca_domain *domain;
144 
145 	rc = dca_pci_rc_from_dev(dev);
146 	domain = dca_find_domain(rc);
147 
148 	if (!domain) {
149 		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
150 			dca_providers_blocked = 1;
151 		} else {
152 			domain = dca_allocate_domain(rc);
153 			if (domain)
154 				list_add(&domain->node, &dca_domains);
155 		}
156 	}
157 
158 	return domain;
159 }
160 
161 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
162 {
163 	struct dca_provider *dca;
164 	struct pci_bus *rc;
165 	struct dca_domain *domain;
166 
167 	if (dev) {
168 		rc = dca_pci_rc_from_dev(dev);
169 		domain = dca_find_domain(rc);
170 		if (!domain)
171 			return NULL;
172 	} else {
173 		if (!list_empty(&dca_domains))
174 			domain = list_first_entry(&dca_domains,
175 						  struct dca_domain,
176 						  node);
177 		else
178 			return NULL;
179 	}
180 
181 	list_for_each_entry(dca, &domain->dca_providers, node)
182 		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
183 			return dca;
184 
185 	return NULL;
186 }
187 
188 /**
189  * dca_add_requester - add a dca client to the list
190  * @dev - the device that wants dca service
191  */
192 int dca_add_requester(struct device *dev)
193 {
194 	struct dca_provider *dca;
195 	int err, slot = -ENODEV;
196 	unsigned long flags;
197 	struct pci_bus *pci_rc;
198 	struct dca_domain *domain;
199 
200 	if (!dev)
201 		return -EFAULT;
202 
203 	spin_lock_irqsave(&dca_lock, flags);
204 
205 	/* check if the requester has not been added already */
206 	dca = dca_find_provider_by_dev(dev);
207 	if (dca) {
208 		spin_unlock_irqrestore(&dca_lock, flags);
209 		return -EEXIST;
210 	}
211 
212 	pci_rc = dca_pci_rc_from_dev(dev);
213 	domain = dca_find_domain(pci_rc);
214 	if (!domain) {
215 		spin_unlock_irqrestore(&dca_lock, flags);
216 		return -ENODEV;
217 	}
218 
219 	list_for_each_entry(dca, &domain->dca_providers, node) {
220 		slot = dca->ops->add_requester(dca, dev);
221 		if (slot >= 0)
222 			break;
223 	}
224 
225 	spin_unlock_irqrestore(&dca_lock, flags);
226 
227 	if (slot < 0)
228 		return slot;
229 
230 	err = dca_sysfs_add_req(dca, dev, slot);
231 	if (err) {
232 		spin_lock_irqsave(&dca_lock, flags);
233 		if (dca == dca_find_provider_by_dev(dev))
234 			dca->ops->remove_requester(dca, dev);
235 		spin_unlock_irqrestore(&dca_lock, flags);
236 		return err;
237 	}
238 
239 	return 0;
240 }
241 EXPORT_SYMBOL_GPL(dca_add_requester);
242 
243 /**
244  * dca_remove_requester - remove a dca client from the list
245  * @dev - the device that wants dca service
246  */
247 int dca_remove_requester(struct device *dev)
248 {
249 	struct dca_provider *dca;
250 	int slot;
251 	unsigned long flags;
252 
253 	if (!dev)
254 		return -EFAULT;
255 
256 	spin_lock_irqsave(&dca_lock, flags);
257 	dca = dca_find_provider_by_dev(dev);
258 	if (!dca) {
259 		spin_unlock_irqrestore(&dca_lock, flags);
260 		return -ENODEV;
261 	}
262 	slot = dca->ops->remove_requester(dca, dev);
263 	spin_unlock_irqrestore(&dca_lock, flags);
264 
265 	if (slot < 0)
266 		return slot;
267 
268 	dca_sysfs_remove_req(dca, slot);
269 
270 	return 0;
271 }
272 EXPORT_SYMBOL_GPL(dca_remove_requester);
273 
274 /**
275  * dca_common_get_tag - return the dca tag (serves both new and old api)
276  * @dev - the device that wants dca service
277  * @cpu - the cpuid as returned by get_cpu()
278  */
279 u8 dca_common_get_tag(struct device *dev, int cpu)
280 {
281 	struct dca_provider *dca;
282 	u8 tag;
283 	unsigned long flags;
284 
285 	spin_lock_irqsave(&dca_lock, flags);
286 
287 	dca = dca_find_provider_by_dev(dev);
288 	if (!dca) {
289 		spin_unlock_irqrestore(&dca_lock, flags);
290 		return -ENODEV;
291 	}
292 	tag = dca->ops->get_tag(dca, dev, cpu);
293 
294 	spin_unlock_irqrestore(&dca_lock, flags);
295 	return tag;
296 }
297 
298 /**
299  * dca3_get_tag - return the dca tag to the requester device
300  *                for the given cpu (new api)
301  * @dev - the device that wants dca service
302  * @cpu - the cpuid as returned by get_cpu()
303  */
304 u8 dca3_get_tag(struct device *dev, int cpu)
305 {
306 	if (!dev)
307 		return -EFAULT;
308 
309 	return dca_common_get_tag(dev, cpu);
310 }
311 EXPORT_SYMBOL_GPL(dca3_get_tag);
312 
313 /**
314  * dca_get_tag - return the dca tag for the given cpu (old api)
315  * @cpu - the cpuid as returned by get_cpu()
316  */
317 u8 dca_get_tag(int cpu)
318 {
319 	struct device *dev = NULL;
320 
321 	return dca_common_get_tag(dev, cpu);
322 }
323 EXPORT_SYMBOL_GPL(dca_get_tag);
324 
325 /**
326  * alloc_dca_provider - get data struct for describing a dca provider
327  * @ops - pointer to struct of dca operation function pointers
328  * @priv_size - size of extra mem to be added for provider's needs
329  */
330 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
331 {
332 	struct dca_provider *dca;
333 	int alloc_size;
334 
335 	alloc_size = (sizeof(*dca) + priv_size);
336 	dca = kzalloc(alloc_size, GFP_KERNEL);
337 	if (!dca)
338 		return NULL;
339 	dca->ops = ops;
340 
341 	return dca;
342 }
343 EXPORT_SYMBOL_GPL(alloc_dca_provider);
344 
345 /**
346  * free_dca_provider - release the dca provider data struct
347  * @ops - pointer to struct of dca operation function pointers
348  * @priv_size - size of extra mem to be added for provider's needs
349  */
350 void free_dca_provider(struct dca_provider *dca)
351 {
352 	kfree(dca);
353 }
354 EXPORT_SYMBOL_GPL(free_dca_provider);
355 
356 /**
357  * register_dca_provider - register a dca provider
358  * @dca - struct created by alloc_dca_provider()
359  * @dev - device providing dca services
360  */
361 int register_dca_provider(struct dca_provider *dca, struct device *dev)
362 {
363 	int err;
364 	unsigned long flags;
365 	struct dca_domain *domain;
366 
367 	spin_lock_irqsave(&dca_lock, flags);
368 	if (dca_providers_blocked) {
369 		spin_unlock_irqrestore(&dca_lock, flags);
370 		return -ENODEV;
371 	}
372 	spin_unlock_irqrestore(&dca_lock, flags);
373 
374 	err = dca_sysfs_add_provider(dca, dev);
375 	if (err)
376 		return err;
377 
378 	spin_lock_irqsave(&dca_lock, flags);
379 	domain = dca_get_domain(dev);
380 	if (!domain) {
381 		if (dca_providers_blocked) {
382 			spin_unlock_irqrestore(&dca_lock, flags);
383 			dca_sysfs_remove_provider(dca);
384 			unregister_dca_providers();
385 		} else {
386 			spin_unlock_irqrestore(&dca_lock, flags);
387 		}
388 		return -ENODEV;
389 	}
390 	list_add(&dca->node, &domain->dca_providers);
391 	spin_unlock_irqrestore(&dca_lock, flags);
392 
393 	blocking_notifier_call_chain(&dca_provider_chain,
394 				     DCA_PROVIDER_ADD, NULL);
395 	return 0;
396 }
397 EXPORT_SYMBOL_GPL(register_dca_provider);
398 
399 /**
400  * unregister_dca_provider - remove a dca provider
401  * @dca - struct created by alloc_dca_provider()
402  */
403 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
404 {
405 	unsigned long flags;
406 	struct pci_bus *pci_rc;
407 	struct dca_domain *domain;
408 
409 	blocking_notifier_call_chain(&dca_provider_chain,
410 				     DCA_PROVIDER_REMOVE, NULL);
411 
412 	spin_lock_irqsave(&dca_lock, flags);
413 
414 	list_del(&dca->node);
415 
416 	pci_rc = dca_pci_rc_from_dev(dev);
417 	domain = dca_find_domain(pci_rc);
418 	if (list_empty(&domain->dca_providers))
419 		dca_free_domain(domain);
420 
421 	spin_unlock_irqrestore(&dca_lock, flags);
422 
423 	dca_sysfs_remove_provider(dca);
424 }
425 EXPORT_SYMBOL_GPL(unregister_dca_provider);
426 
427 /**
428  * dca_register_notify - register a client's notifier callback
429  */
430 void dca_register_notify(struct notifier_block *nb)
431 {
432 	blocking_notifier_chain_register(&dca_provider_chain, nb);
433 }
434 EXPORT_SYMBOL_GPL(dca_register_notify);
435 
436 /**
437  * dca_unregister_notify - remove a client's notifier callback
438  */
439 void dca_unregister_notify(struct notifier_block *nb)
440 {
441 	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
442 }
443 EXPORT_SYMBOL_GPL(dca_unregister_notify);
444 
445 static int __init dca_init(void)
446 {
447 	pr_info("dca service started, version %s\n", DCA_VERSION);
448 	return dca_sysfs_init();
449 }
450 
451 static void __exit dca_exit(void)
452 {
453 	dca_sysfs_exit();
454 }
455 
456 arch_initcall(dca_init);
457 module_exit(dca_exit);
458 
459