1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * From setup-res.c, by:
4 * Dave Rusling (david.rusling@reo.mts.dec.com)
5 * David Mosberger (davidm@cs.arizona.edu)
6 * David Miller (davem@redhat.com)
7 * Ivan Kokshaysky (ink@jurassic.park.msu.ru)
8 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/errno.h>
13 #include <linux/ioport.h>
14 #include <linux/of.h>
15 #include <linux/proc_fs.h>
16 #include <linux/slab.h>
17
18 #include "pci.h"
19
pci_add_resource_offset(struct list_head * resources,struct resource * res,resource_size_t offset)20 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
21 resource_size_t offset)
22 {
23 struct resource_entry *entry;
24
25 entry = resource_list_create_entry(res, 0);
26 if (!entry) {
27 pr_err("PCI: can't add host bridge window %pR\n", res);
28 return;
29 }
30
31 entry->offset = offset;
32 resource_list_add_tail(entry, resources);
33 }
34 EXPORT_SYMBOL(pci_add_resource_offset);
35
pci_add_resource(struct list_head * resources,struct resource * res)36 void pci_add_resource(struct list_head *resources, struct resource *res)
37 {
38 pci_add_resource_offset(resources, res, 0);
39 }
40 EXPORT_SYMBOL(pci_add_resource);
41
pci_free_resource_list(struct list_head * resources)42 void pci_free_resource_list(struct list_head *resources)
43 {
44 resource_list_free(resources);
45 }
46 EXPORT_SYMBOL(pci_free_resource_list);
47
pci_bus_add_resource(struct pci_bus * bus,struct resource * res,unsigned int flags)48 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
49 unsigned int flags)
50 {
51 struct pci_bus_resource *bus_res;
52
53 bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
54 if (!bus_res) {
55 dev_err(&bus->dev, "can't add %pR resource\n", res);
56 return;
57 }
58
59 bus_res->res = res;
60 bus_res->flags = flags;
61 list_add_tail(&bus_res->list, &bus->resources);
62 }
63
pci_bus_resource_n(const struct pci_bus * bus,int n)64 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
65 {
66 struct pci_bus_resource *bus_res;
67
68 if (n < PCI_BRIDGE_RESOURCE_NUM)
69 return bus->resource[n];
70
71 n -= PCI_BRIDGE_RESOURCE_NUM;
72 list_for_each_entry(bus_res, &bus->resources, list) {
73 if (n-- == 0)
74 return bus_res->res;
75 }
76 return NULL;
77 }
78 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
79
pci_bus_remove_resource(struct pci_bus * bus,struct resource * res)80 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
81 {
82 struct pci_bus_resource *bus_res, *tmp;
83 int i;
84
85 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
86 if (bus->resource[i] == res) {
87 bus->resource[i] = NULL;
88 return;
89 }
90 }
91
92 list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
93 if (bus_res->res == res) {
94 list_del(&bus_res->list);
95 kfree(bus_res);
96 return;
97 }
98 }
99 }
100
pci_bus_remove_resources(struct pci_bus * bus)101 void pci_bus_remove_resources(struct pci_bus *bus)
102 {
103 int i;
104 struct pci_bus_resource *bus_res, *tmp;
105
106 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
107 bus->resource[i] = NULL;
108
109 list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
110 list_del(&bus_res->list);
111 kfree(bus_res);
112 }
113 }
114
devm_request_pci_bus_resources(struct device * dev,struct list_head * resources)115 int devm_request_pci_bus_resources(struct device *dev,
116 struct list_head *resources)
117 {
118 struct resource_entry *win;
119 struct resource *parent, *res;
120 int err;
121
122 resource_list_for_each_entry(win, resources) {
123 res = win->res;
124 switch (resource_type(res)) {
125 case IORESOURCE_IO:
126 parent = &ioport_resource;
127 break;
128 case IORESOURCE_MEM:
129 parent = &iomem_resource;
130 break;
131 default:
132 continue;
133 }
134
135 err = devm_request_resource(dev, parent, res);
136 if (err)
137 return err;
138 }
139
140 return 0;
141 }
142 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
143
144 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
145 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
146 static struct pci_bus_region pci_64_bit = {0,
147 (pci_bus_addr_t) 0xffffffffffffffffULL};
148 static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
149 (pci_bus_addr_t) 0xffffffffffffffffULL};
150 #endif
151
152 /*
153 * @res contains CPU addresses. Clip it so the corresponding bus addresses
154 * on @bus are entirely within @region. This is used to control the bus
155 * addresses of resources we allocate, e.g., we may need a resource that
156 * can be mapped by a 32-bit BAR.
157 */
pci_clip_resource_to_region(struct pci_bus * bus,struct resource * res,struct pci_bus_region * region)158 static void pci_clip_resource_to_region(struct pci_bus *bus,
159 struct resource *res,
160 struct pci_bus_region *region)
161 {
162 struct pci_bus_region r;
163
164 pcibios_resource_to_bus(bus, &r, res);
165 if (r.start < region->start)
166 r.start = region->start;
167 if (r.end > region->end)
168 r.end = region->end;
169
170 if (r.end < r.start)
171 res->end = res->start - 1;
172 else
173 pcibios_bus_to_resource(bus, res, &r);
174 }
175
pci_bus_alloc_from_region(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data,struct pci_bus_region * region)176 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
177 resource_size_t size, resource_size_t align,
178 resource_size_t min, unsigned long type_mask,
179 resource_size_t (*alignf)(void *,
180 const struct resource *,
181 resource_size_t,
182 resource_size_t),
183 void *alignf_data,
184 struct pci_bus_region *region)
185 {
186 struct resource *r, avail;
187 resource_size_t max;
188 int ret;
189
190 type_mask |= IORESOURCE_TYPE_BITS;
191
192 pci_bus_for_each_resource(bus, r) {
193 resource_size_t min_used = min;
194
195 if (!r)
196 continue;
197
198 /* type_mask must match */
199 if ((res->flags ^ r->flags) & type_mask)
200 continue;
201
202 /* We cannot allocate a non-prefetching resource
203 from a pre-fetching area */
204 if ((r->flags & IORESOURCE_PREFETCH) &&
205 !(res->flags & IORESOURCE_PREFETCH))
206 continue;
207
208 avail = *r;
209 pci_clip_resource_to_region(bus, &avail, region);
210
211 /*
212 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
213 * protect badly documented motherboard resources, but if
214 * this is an already-configured bridge window, its start
215 * overrides "min".
216 */
217 if (avail.start)
218 min_used = avail.start;
219
220 max = avail.end;
221
222 /* Don't bother if available space isn't large enough */
223 if (size > max - min_used + 1)
224 continue;
225
226 /* Ok, try it out.. */
227 ret = allocate_resource(r, res, size, min_used, max,
228 align, alignf, alignf_data);
229 if (ret == 0)
230 return 0;
231 }
232 return -ENOMEM;
233 }
234
235 /**
236 * pci_bus_alloc_resource - allocate a resource from a parent bus
237 * @bus: PCI bus
238 * @res: resource to allocate
239 * @size: size of resource to allocate
240 * @align: alignment of resource to allocate
241 * @min: minimum /proc/iomem address to allocate
242 * @type_mask: IORESOURCE_* type flags
243 * @alignf: resource alignment function
244 * @alignf_data: data argument for resource alignment function
245 *
246 * Given the PCI bus a device resides on, the size, minimum address,
247 * alignment and type, try to find an acceptable resource allocation
248 * for a specific device resource.
249 */
pci_bus_alloc_resource(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data)250 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
251 resource_size_t size, resource_size_t align,
252 resource_size_t min, unsigned long type_mask,
253 resource_size_t (*alignf)(void *,
254 const struct resource *,
255 resource_size_t,
256 resource_size_t),
257 void *alignf_data)
258 {
259 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
260 int rc;
261
262 if (res->flags & IORESOURCE_MEM_64) {
263 rc = pci_bus_alloc_from_region(bus, res, size, align, min,
264 type_mask, alignf, alignf_data,
265 &pci_high);
266 if (rc == 0)
267 return 0;
268
269 return pci_bus_alloc_from_region(bus, res, size, align, min,
270 type_mask, alignf, alignf_data,
271 &pci_64_bit);
272 }
273 #endif
274
275 return pci_bus_alloc_from_region(bus, res, size, align, min,
276 type_mask, alignf, alignf_data,
277 &pci_32_bit);
278 }
279 EXPORT_SYMBOL(pci_bus_alloc_resource);
280
281 /*
282 * The @idx resource of @dev should be a PCI-PCI bridge window. If this
283 * resource fits inside a window of an upstream bridge, do nothing. If it
284 * overlaps an upstream window but extends outside it, clip the resource so
285 * it fits completely inside.
286 */
pci_bus_clip_resource(struct pci_dev * dev,int idx)287 bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
288 {
289 struct pci_bus *bus = dev->bus;
290 struct resource *res = &dev->resource[idx];
291 struct resource orig_res = *res;
292 struct resource *r;
293
294 pci_bus_for_each_resource(bus, r) {
295 resource_size_t start, end;
296
297 if (!r)
298 continue;
299
300 if (resource_type(res) != resource_type(r))
301 continue;
302
303 start = max(r->start, res->start);
304 end = min(r->end, res->end);
305
306 if (start > end)
307 continue; /* no overlap */
308
309 if (res->start == start && res->end == end)
310 return false; /* no change */
311
312 res->start = start;
313 res->end = end;
314 res->flags &= ~IORESOURCE_UNSET;
315 orig_res.flags &= ~IORESOURCE_UNSET;
316 pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
317
318 return true;
319 }
320
321 return false;
322 }
323
pcibios_resource_survey_bus(struct pci_bus * bus)324 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
325
pcibios_bus_add_device(struct pci_dev * pdev)326 void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
327
328 /**
329 * pci_bus_add_device - start driver for a single device
330 * @dev: device to add
331 *
332 * This adds add sysfs entries and start device drivers
333 */
pci_bus_add_device(struct pci_dev * dev)334 void pci_bus_add_device(struct pci_dev *dev)
335 {
336 struct device_node *dn = dev->dev.of_node;
337 int retval;
338
339 /*
340 * Can not put in pci_device_add yet because resources
341 * are not assigned yet for some devices.
342 */
343 pcibios_bus_add_device(dev);
344 pci_fixup_device(pci_fixup_final, dev);
345 if (pci_is_bridge(dev))
346 of_pci_make_dev_node(dev);
347 pci_create_sysfs_dev_files(dev);
348 pci_proc_attach_device(dev);
349 pci_bridge_d3_update(dev);
350
351 dev->match_driver = !dn || of_device_is_available(dn);
352 retval = device_attach(&dev->dev);
353 if (retval < 0 && retval != -EPROBE_DEFER)
354 pci_warn(dev, "device attach failed (%d)\n", retval);
355
356 pci_dev_assign_added(dev, true);
357 }
358 EXPORT_SYMBOL_GPL(pci_bus_add_device);
359
360 /**
361 * pci_bus_add_devices - start driver for PCI devices
362 * @bus: bus to check for new devices
363 *
364 * Start driver for PCI devices and add some sysfs entries.
365 */
pci_bus_add_devices(const struct pci_bus * bus)366 void pci_bus_add_devices(const struct pci_bus *bus)
367 {
368 struct pci_dev *dev;
369 struct pci_bus *child;
370
371 list_for_each_entry(dev, &bus->devices, bus_list) {
372 /* Skip already-added devices */
373 if (pci_dev_is_added(dev))
374 continue;
375 pci_bus_add_device(dev);
376 }
377
378 list_for_each_entry(dev, &bus->devices, bus_list) {
379 /* Skip if device attach failed */
380 if (!pci_dev_is_added(dev))
381 continue;
382 child = dev->subordinate;
383 if (child)
384 pci_bus_add_devices(child);
385 }
386 }
387 EXPORT_SYMBOL(pci_bus_add_devices);
388
__pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata,bool locked)389 static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
390 void *userdata, bool locked)
391 {
392 struct pci_dev *dev;
393 struct pci_bus *bus;
394 struct list_head *next;
395 int retval;
396
397 bus = top;
398 if (!locked)
399 down_read(&pci_bus_sem);
400 next = top->devices.next;
401 for (;;) {
402 if (next == &bus->devices) {
403 /* end of this bus, go up or finish */
404 if (bus == top)
405 break;
406 next = bus->self->bus_list.next;
407 bus = bus->self->bus;
408 continue;
409 }
410 dev = list_entry(next, struct pci_dev, bus_list);
411 if (dev->subordinate) {
412 /* this is a pci-pci bridge, do its devices next */
413 next = dev->subordinate->devices.next;
414 bus = dev->subordinate;
415 } else
416 next = dev->bus_list.next;
417
418 retval = cb(dev, userdata);
419 if (retval)
420 break;
421 }
422 if (!locked)
423 up_read(&pci_bus_sem);
424 }
425
426 /**
427 * pci_walk_bus - walk devices on/under bus, calling callback.
428 * @top: bus whose devices should be walked
429 * @cb: callback to be called for each device found
430 * @userdata: arbitrary pointer to be passed to callback
431 *
432 * Walk the given bus, including any bridged devices
433 * on buses under this bus. Call the provided callback
434 * on each device found.
435 *
436 * We check the return of @cb each time. If it returns anything
437 * other than 0, we break out.
438 */
pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)439 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
440 {
441 __pci_walk_bus(top, cb, userdata, false);
442 }
443 EXPORT_SYMBOL_GPL(pci_walk_bus);
444
pci_walk_bus_locked(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)445 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
446 {
447 lockdep_assert_held(&pci_bus_sem);
448
449 __pci_walk_bus(top, cb, userdata, true);
450 }
451 EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
452
pci_bus_get(struct pci_bus * bus)453 struct pci_bus *pci_bus_get(struct pci_bus *bus)
454 {
455 if (bus)
456 get_device(&bus->dev);
457 return bus;
458 }
459
pci_bus_put(struct pci_bus * bus)460 void pci_bus_put(struct pci_bus *bus)
461 {
462 if (bus)
463 put_device(&bus->dev);
464 }
465