1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2014 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
5 */
6
7 #include <common.h>
8 #include <dm.h>
9 #include <errno.h>
10 #include <pci.h>
11 #include <asm/io.h>
12 #include <dm/device-internal.h>
13 #include <dm/lists.h>
14 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
15 #include <asm/fsp/fsp_support.h>
16 #endif
17 #include "pci_internal.h"
18
19 DECLARE_GLOBAL_DATA_PTR;
20
pci_get_bus(int busnum,struct udevice ** busp)21 int pci_get_bus(int busnum, struct udevice **busp)
22 {
23 int ret;
24
25 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
26
27 /* Since buses may not be numbered yet try a little harder with bus 0 */
28 if (ret == -ENODEV) {
29 ret = uclass_first_device_err(UCLASS_PCI, busp);
30 if (ret)
31 return ret;
32 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
33 }
34
35 return ret;
36 }
37
pci_get_controller(struct udevice * dev)38 struct udevice *pci_get_controller(struct udevice *dev)
39 {
40 while (device_is_on_pci_bus(dev))
41 dev = dev->parent;
42
43 return dev;
44 }
45
dm_pci_get_bdf(struct udevice * dev)46 pci_dev_t dm_pci_get_bdf(struct udevice *dev)
47 {
48 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev);
49 struct udevice *bus = dev->parent;
50
51 return PCI_ADD_BUS(bus->seq, pplat->devfn);
52 }
53
54 /**
55 * pci_get_bus_max() - returns the bus number of the last active bus
56 *
57 * @return last bus number, or -1 if no active buses
58 */
pci_get_bus_max(void)59 static int pci_get_bus_max(void)
60 {
61 struct udevice *bus;
62 struct uclass *uc;
63 int ret = -1;
64
65 ret = uclass_get(UCLASS_PCI, &uc);
66 uclass_foreach_dev(bus, uc) {
67 if (bus->seq > ret)
68 ret = bus->seq;
69 }
70
71 debug("%s: ret=%d\n", __func__, ret);
72
73 return ret;
74 }
75
pci_last_busno(void)76 int pci_last_busno(void)
77 {
78 return pci_get_bus_max();
79 }
80
pci_get_ff(enum pci_size_t size)81 int pci_get_ff(enum pci_size_t size)
82 {
83 switch (size) {
84 case PCI_SIZE_8:
85 return 0xff;
86 case PCI_SIZE_16:
87 return 0xffff;
88 default:
89 return 0xffffffff;
90 }
91 }
92
pci_dev_find_ofnode(struct udevice * bus,phys_addr_t bdf,ofnode * rnode)93 static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf,
94 ofnode *rnode)
95 {
96 struct fdt_pci_addr addr;
97 ofnode node;
98 int ret;
99
100 dev_for_each_subnode(node, bus) {
101 ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg",
102 &addr);
103 if (ret)
104 continue;
105
106 if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf))
107 continue;
108
109 *rnode = node;
110 break;
111 }
112 };
113
pci_bus_find_devfn(struct udevice * bus,pci_dev_t find_devfn,struct udevice ** devp)114 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn,
115 struct udevice **devp)
116 {
117 struct udevice *dev;
118
119 for (device_find_first_child(bus, &dev);
120 dev;
121 device_find_next_child(&dev)) {
122 struct pci_child_platdata *pplat;
123
124 pplat = dev_get_parent_platdata(dev);
125 if (pplat && pplat->devfn == find_devfn) {
126 *devp = dev;
127 return 0;
128 }
129 }
130
131 return -ENODEV;
132 }
133
dm_pci_bus_find_bdf(pci_dev_t bdf,struct udevice ** devp)134 int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
135 {
136 struct udevice *bus;
137 int ret;
138
139 ret = pci_get_bus(PCI_BUS(bdf), &bus);
140 if (ret)
141 return ret;
142 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
143 }
144
pci_device_matches_ids(struct udevice * dev,struct pci_device_id * ids)145 static int pci_device_matches_ids(struct udevice *dev,
146 struct pci_device_id *ids)
147 {
148 struct pci_child_platdata *pplat;
149 int i;
150
151 pplat = dev_get_parent_platdata(dev);
152 if (!pplat)
153 return -EINVAL;
154 for (i = 0; ids[i].vendor != 0; i++) {
155 if (pplat->vendor == ids[i].vendor &&
156 pplat->device == ids[i].device)
157 return i;
158 }
159
160 return -EINVAL;
161 }
162
pci_bus_find_devices(struct udevice * bus,struct pci_device_id * ids,int * indexp,struct udevice ** devp)163 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids,
164 int *indexp, struct udevice **devp)
165 {
166 struct udevice *dev;
167
168 /* Scan all devices on this bus */
169 for (device_find_first_child(bus, &dev);
170 dev;
171 device_find_next_child(&dev)) {
172 if (pci_device_matches_ids(dev, ids) >= 0) {
173 if ((*indexp)-- <= 0) {
174 *devp = dev;
175 return 0;
176 }
177 }
178 }
179
180 return -ENODEV;
181 }
182
pci_find_device_id(struct pci_device_id * ids,int index,struct udevice ** devp)183 int pci_find_device_id(struct pci_device_id *ids, int index,
184 struct udevice **devp)
185 {
186 struct udevice *bus;
187
188 /* Scan all known buses */
189 for (uclass_first_device(UCLASS_PCI, &bus);
190 bus;
191 uclass_next_device(&bus)) {
192 if (!pci_bus_find_devices(bus, ids, &index, devp))
193 return 0;
194 }
195 *devp = NULL;
196
197 return -ENODEV;
198 }
199
dm_pci_bus_find_device(struct udevice * bus,unsigned int vendor,unsigned int device,int * indexp,struct udevice ** devp)200 static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
201 unsigned int device, int *indexp,
202 struct udevice **devp)
203 {
204 struct pci_child_platdata *pplat;
205 struct udevice *dev;
206
207 for (device_find_first_child(bus, &dev);
208 dev;
209 device_find_next_child(&dev)) {
210 pplat = dev_get_parent_platdata(dev);
211 if (pplat->vendor == vendor && pplat->device == device) {
212 if (!(*indexp)--) {
213 *devp = dev;
214 return 0;
215 }
216 }
217 }
218
219 return -ENODEV;
220 }
221
dm_pci_find_device(unsigned int vendor,unsigned int device,int index,struct udevice ** devp)222 int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
223 struct udevice **devp)
224 {
225 struct udevice *bus;
226
227 /* Scan all known buses */
228 for (uclass_first_device(UCLASS_PCI, &bus);
229 bus;
230 uclass_next_device(&bus)) {
231 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
232 return device_probe(*devp);
233 }
234 *devp = NULL;
235
236 return -ENODEV;
237 }
238
dm_pci_find_class(uint find_class,int index,struct udevice ** devp)239 int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
240 {
241 struct udevice *dev;
242
243 /* Scan all known buses */
244 for (pci_find_first_device(&dev);
245 dev;
246 pci_find_next_device(&dev)) {
247 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev);
248
249 if (pplat->class == find_class && !index--) {
250 *devp = dev;
251 return device_probe(*devp);
252 }
253 }
254 *devp = NULL;
255
256 return -ENODEV;
257 }
258
pci_bus_write_config(struct udevice * bus,pci_dev_t bdf,int offset,unsigned long value,enum pci_size_t size)259 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
260 unsigned long value, enum pci_size_t size)
261 {
262 struct dm_pci_ops *ops;
263
264 ops = pci_get_ops(bus);
265 if (!ops->write_config)
266 return -ENOSYS;
267 return ops->write_config(bus, bdf, offset, value, size);
268 }
269
pci_bus_clrset_config32(struct udevice * bus,pci_dev_t bdf,int offset,u32 clr,u32 set)270 int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
271 u32 clr, u32 set)
272 {
273 ulong val;
274 int ret;
275
276 ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
277 if (ret)
278 return ret;
279 val &= ~clr;
280 val |= set;
281
282 return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
283 }
284
pci_write_config(pci_dev_t bdf,int offset,unsigned long value,enum pci_size_t size)285 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
286 enum pci_size_t size)
287 {
288 struct udevice *bus;
289 int ret;
290
291 ret = pci_get_bus(PCI_BUS(bdf), &bus);
292 if (ret)
293 return ret;
294
295 return pci_bus_write_config(bus, bdf, offset, value, size);
296 }
297
dm_pci_write_config(struct udevice * dev,int offset,unsigned long value,enum pci_size_t size)298 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
299 enum pci_size_t size)
300 {
301 struct udevice *bus;
302
303 for (bus = dev; device_is_on_pci_bus(bus);)
304 bus = bus->parent;
305 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
306 size);
307 }
308
pci_write_config32(pci_dev_t bdf,int offset,u32 value)309 int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
310 {
311 return pci_write_config(bdf, offset, value, PCI_SIZE_32);
312 }
313
pci_write_config16(pci_dev_t bdf,int offset,u16 value)314 int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
315 {
316 return pci_write_config(bdf, offset, value, PCI_SIZE_16);
317 }
318
pci_write_config8(pci_dev_t bdf,int offset,u8 value)319 int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
320 {
321 return pci_write_config(bdf, offset, value, PCI_SIZE_8);
322 }
323
dm_pci_write_config8(struct udevice * dev,int offset,u8 value)324 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
325 {
326 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
327 }
328
dm_pci_write_config16(struct udevice * dev,int offset,u16 value)329 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
330 {
331 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
332 }
333
dm_pci_write_config32(struct udevice * dev,int offset,u32 value)334 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
335 {
336 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
337 }
338
pci_bus_read_config(struct udevice * bus,pci_dev_t bdf,int offset,unsigned long * valuep,enum pci_size_t size)339 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset,
340 unsigned long *valuep, enum pci_size_t size)
341 {
342 struct dm_pci_ops *ops;
343
344 ops = pci_get_ops(bus);
345 if (!ops->read_config)
346 return -ENOSYS;
347 return ops->read_config(bus, bdf, offset, valuep, size);
348 }
349
pci_read_config(pci_dev_t bdf,int offset,unsigned long * valuep,enum pci_size_t size)350 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
351 enum pci_size_t size)
352 {
353 struct udevice *bus;
354 int ret;
355
356 ret = pci_get_bus(PCI_BUS(bdf), &bus);
357 if (ret)
358 return ret;
359
360 return pci_bus_read_config(bus, bdf, offset, valuep, size);
361 }
362
dm_pci_read_config(struct udevice * dev,int offset,unsigned long * valuep,enum pci_size_t size)363 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep,
364 enum pci_size_t size)
365 {
366 struct udevice *bus;
367
368 for (bus = dev; device_is_on_pci_bus(bus);)
369 bus = bus->parent;
370 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
371 size);
372 }
373
pci_read_config32(pci_dev_t bdf,int offset,u32 * valuep)374 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
375 {
376 unsigned long value;
377 int ret;
378
379 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
380 if (ret)
381 return ret;
382 *valuep = value;
383
384 return 0;
385 }
386
pci_read_config16(pci_dev_t bdf,int offset,u16 * valuep)387 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
388 {
389 unsigned long value;
390 int ret;
391
392 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
393 if (ret)
394 return ret;
395 *valuep = value;
396
397 return 0;
398 }
399
pci_read_config8(pci_dev_t bdf,int offset,u8 * valuep)400 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
401 {
402 unsigned long value;
403 int ret;
404
405 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
406 if (ret)
407 return ret;
408 *valuep = value;
409
410 return 0;
411 }
412
dm_pci_read_config8(struct udevice * dev,int offset,u8 * valuep)413 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep)
414 {
415 unsigned long value;
416 int ret;
417
418 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
419 if (ret)
420 return ret;
421 *valuep = value;
422
423 return 0;
424 }
425
dm_pci_read_config16(struct udevice * dev,int offset,u16 * valuep)426 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep)
427 {
428 unsigned long value;
429 int ret;
430
431 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
432 if (ret)
433 return ret;
434 *valuep = value;
435
436 return 0;
437 }
438
dm_pci_read_config32(struct udevice * dev,int offset,u32 * valuep)439 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep)
440 {
441 unsigned long value;
442 int ret;
443
444 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
445 if (ret)
446 return ret;
447 *valuep = value;
448
449 return 0;
450 }
451
dm_pci_clrset_config8(struct udevice * dev,int offset,u32 clr,u32 set)452 int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
453 {
454 u8 val;
455 int ret;
456
457 ret = dm_pci_read_config8(dev, offset, &val);
458 if (ret)
459 return ret;
460 val &= ~clr;
461 val |= set;
462
463 return dm_pci_write_config8(dev, offset, val);
464 }
465
dm_pci_clrset_config16(struct udevice * dev,int offset,u32 clr,u32 set)466 int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
467 {
468 u16 val;
469 int ret;
470
471 ret = dm_pci_read_config16(dev, offset, &val);
472 if (ret)
473 return ret;
474 val &= ~clr;
475 val |= set;
476
477 return dm_pci_write_config16(dev, offset, val);
478 }
479
dm_pci_clrset_config32(struct udevice * dev,int offset,u32 clr,u32 set)480 int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
481 {
482 u32 val;
483 int ret;
484
485 ret = dm_pci_read_config32(dev, offset, &val);
486 if (ret)
487 return ret;
488 val &= ~clr;
489 val |= set;
490
491 return dm_pci_write_config32(dev, offset, val);
492 }
493
set_vga_bridge_bits(struct udevice * dev)494 static void set_vga_bridge_bits(struct udevice *dev)
495 {
496 struct udevice *parent = dev->parent;
497 u16 bc;
498
499 while (parent->seq != 0) {
500 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
501 bc |= PCI_BRIDGE_CTL_VGA;
502 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
503 parent = parent->parent;
504 }
505 }
506
pci_auto_config_devices(struct udevice * bus)507 int pci_auto_config_devices(struct udevice *bus)
508 {
509 struct pci_controller *hose = bus->uclass_priv;
510 struct pci_child_platdata *pplat;
511 unsigned int sub_bus;
512 struct udevice *dev;
513 int ret;
514
515 sub_bus = bus->seq;
516 debug("%s: start\n", __func__);
517 pciauto_config_init(hose);
518 for (ret = device_find_first_child(bus, &dev);
519 !ret && dev;
520 ret = device_find_next_child(&dev)) {
521 unsigned int max_bus;
522 int ret;
523
524 debug("%s: device %s\n", __func__, dev->name);
525 ret = dm_pciauto_config_device(dev);
526 if (ret < 0)
527 return ret;
528 max_bus = ret;
529 sub_bus = max(sub_bus, max_bus);
530
531 pplat = dev_get_parent_platdata(dev);
532 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
533 set_vga_bridge_bits(dev);
534 }
535 debug("%s: done\n", __func__);
536
537 return sub_bus;
538 }
539
pci_generic_mmap_write_config(struct udevice * bus,int (* addr_f)(struct udevice * bus,pci_dev_t bdf,uint offset,void ** addrp),pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)540 int pci_generic_mmap_write_config(
541 struct udevice *bus,
542 int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp),
543 pci_dev_t bdf,
544 uint offset,
545 ulong value,
546 enum pci_size_t size)
547 {
548 void *address;
549
550 if (addr_f(bus, bdf, offset, &address) < 0)
551 return 0;
552
553 switch (size) {
554 case PCI_SIZE_8:
555 writeb(value, address);
556 return 0;
557 case PCI_SIZE_16:
558 writew(value, address);
559 return 0;
560 case PCI_SIZE_32:
561 writel(value, address);
562 return 0;
563 default:
564 return -EINVAL;
565 }
566 }
567
pci_generic_mmap_read_config(struct udevice * bus,int (* addr_f)(struct udevice * bus,pci_dev_t bdf,uint offset,void ** addrp),pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)568 int pci_generic_mmap_read_config(
569 struct udevice *bus,
570 int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp),
571 pci_dev_t bdf,
572 uint offset,
573 ulong *valuep,
574 enum pci_size_t size)
575 {
576 void *address;
577
578 if (addr_f(bus, bdf, offset, &address) < 0) {
579 *valuep = pci_get_ff(size);
580 return 0;
581 }
582
583 switch (size) {
584 case PCI_SIZE_8:
585 *valuep = readb(address);
586 return 0;
587 case PCI_SIZE_16:
588 *valuep = readw(address);
589 return 0;
590 case PCI_SIZE_32:
591 *valuep = readl(address);
592 return 0;
593 default:
594 return -EINVAL;
595 }
596 }
597
dm_pci_hose_probe_bus(struct udevice * bus)598 int dm_pci_hose_probe_bus(struct udevice *bus)
599 {
600 int sub_bus;
601 int ret;
602
603 debug("%s\n", __func__);
604
605 sub_bus = pci_get_bus_max() + 1;
606 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
607 dm_pciauto_prescan_setup_bridge(bus, sub_bus);
608
609 ret = device_probe(bus);
610 if (ret) {
611 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
612 ret);
613 return ret;
614 }
615 if (sub_bus != bus->seq) {
616 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n",
617 __func__, bus->name, bus->seq, sub_bus);
618 return -EPIPE;
619 }
620 sub_bus = pci_get_bus_max();
621 dm_pciauto_postscan_setup_bridge(bus, sub_bus);
622
623 return sub_bus;
624 }
625
626 /**
627 * pci_match_one_device - Tell if a PCI device structure has a matching
628 * PCI device id structure
629 * @id: single PCI device id structure to match
630 * @find: the PCI device id structure to match against
631 *
632 * Returns true if the finding pci_device_id structure matched or false if
633 * there is no match.
634 */
pci_match_one_id(const struct pci_device_id * id,const struct pci_device_id * find)635 static bool pci_match_one_id(const struct pci_device_id *id,
636 const struct pci_device_id *find)
637 {
638 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
639 (id->device == PCI_ANY_ID || id->device == find->device) &&
640 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
641 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
642 !((id->class ^ find->class) & id->class_mask))
643 return true;
644
645 return false;
646 }
647
648 /**
649 * pci_find_and_bind_driver() - Find and bind the right PCI driver
650 *
651 * This only looks at certain fields in the descriptor.
652 *
653 * @parent: Parent bus
654 * @find_id: Specification of the driver to find
655 * @bdf: Bus/device/function addreess - see PCI_BDF()
656 * @devp: Returns a pointer to the device created
657 * @return 0 if OK, -EPERM if the device is not needed before relocation and
658 * therefore was not created, other -ve value on error
659 */
pci_find_and_bind_driver(struct udevice * parent,struct pci_device_id * find_id,pci_dev_t bdf,struct udevice ** devp)660 static int pci_find_and_bind_driver(struct udevice *parent,
661 struct pci_device_id *find_id,
662 pci_dev_t bdf, struct udevice **devp)
663 {
664 struct pci_driver_entry *start, *entry;
665 ofnode node = ofnode_null();
666 const char *drv;
667 int n_ents;
668 int ret;
669 char name[30], *str;
670 bool bridge;
671
672 *devp = NULL;
673
674 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
675 find_id->vendor, find_id->device);
676
677 /* Determine optional OF node */
678 pci_dev_find_ofnode(parent, bdf, &node);
679
680 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
681 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
682 for (entry = start; entry != start + n_ents; entry++) {
683 const struct pci_device_id *id;
684 struct udevice *dev;
685 const struct driver *drv;
686
687 for (id = entry->match;
688 id->vendor || id->subvendor || id->class_mask;
689 id++) {
690 if (!pci_match_one_id(id, find_id))
691 continue;
692
693 drv = entry->driver;
694
695 /*
696 * In the pre-relocation phase, we only bind devices
697 * whose driver has the DM_FLAG_PRE_RELOC set, to save
698 * precious memory space as on some platforms as that
699 * space is pretty limited (ie: using Cache As RAM).
700 */
701 if (!(gd->flags & GD_FLG_RELOC) &&
702 !(drv->flags & DM_FLAG_PRE_RELOC))
703 return -EPERM;
704
705 /*
706 * We could pass the descriptor to the driver as
707 * platdata (instead of NULL) and allow its bind()
708 * method to return -ENOENT if it doesn't support this
709 * device. That way we could continue the search to
710 * find another driver. For now this doesn't seem
711 * necesssary, so just bind the first match.
712 */
713 ret = device_bind_ofnode(parent, drv, drv->name, NULL,
714 node, &dev);
715 if (ret)
716 goto error;
717 debug("%s: Match found: %s\n", __func__, drv->name);
718 dev->driver_data = id->driver_data;
719 *devp = dev;
720 return 0;
721 }
722 }
723
724 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
725 /*
726 * In the pre-relocation phase, we only bind bridge devices to save
727 * precious memory space as on some platforms as that space is pretty
728 * limited (ie: using Cache As RAM).
729 */
730 if (!(gd->flags & GD_FLG_RELOC) && !bridge)
731 return -EPERM;
732
733 /* Bind a generic driver so that the device can be used */
734 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf),
735 PCI_FUNC(bdf));
736 str = strdup(name);
737 if (!str)
738 return -ENOMEM;
739 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
740
741 ret = device_bind_driver_to_node(parent, drv, str, node, devp);
742 if (ret) {
743 debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
744 free(str);
745 return ret;
746 }
747 debug("%s: No match found: bound generic driver instead\n", __func__);
748
749 return 0;
750
751 error:
752 debug("%s: No match found: error %d\n", __func__, ret);
753 return ret;
754 }
755
pci_bind_bus_devices(struct udevice * bus)756 int pci_bind_bus_devices(struct udevice *bus)
757 {
758 ulong vendor, device;
759 ulong header_type;
760 pci_dev_t bdf, end;
761 bool found_multi;
762 int ret;
763
764 found_multi = false;
765 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1,
766 PCI_MAX_PCI_FUNCTIONS - 1);
767 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end;
768 bdf += PCI_BDF(0, 0, 1)) {
769 struct pci_child_platdata *pplat;
770 struct udevice *dev;
771 ulong class;
772
773 if (!PCI_FUNC(bdf))
774 found_multi = false;
775 if (PCI_FUNC(bdf) && !found_multi)
776 continue;
777
778 /* Check only the first access, we don't expect problems */
779 ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
780 PCI_SIZE_16);
781 if (ret)
782 goto error;
783
784 if (vendor == 0xffff || vendor == 0x0000)
785 continue;
786
787 pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
788 &header_type, PCI_SIZE_8);
789
790 if (!PCI_FUNC(bdf))
791 found_multi = header_type & 0x80;
792
793 debug("%s: bus %d/%s: found device %x, function %d\n", __func__,
794 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
795 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
796 PCI_SIZE_16);
797 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
798 PCI_SIZE_32);
799 class >>= 8;
800
801 /* Find this device in the device tree */
802 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
803
804 /* If nothing in the device tree, bind a device */
805 if (ret == -ENODEV) {
806 struct pci_device_id find_id;
807 ulong val;
808
809 memset(&find_id, '\0', sizeof(find_id));
810 find_id.vendor = vendor;
811 find_id.device = device;
812 find_id.class = class;
813 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
814 pci_bus_read_config(bus, bdf,
815 PCI_SUBSYSTEM_VENDOR_ID,
816 &val, PCI_SIZE_32);
817 find_id.subvendor = val & 0xffff;
818 find_id.subdevice = val >> 16;
819 }
820 ret = pci_find_and_bind_driver(bus, &find_id, bdf,
821 &dev);
822 }
823 if (ret == -EPERM)
824 continue;
825 else if (ret)
826 return ret;
827
828 /* Update the platform data */
829 pplat = dev_get_parent_platdata(dev);
830 pplat->devfn = PCI_MASK_BUS(bdf);
831 pplat->vendor = vendor;
832 pplat->device = device;
833 pplat->class = class;
834 }
835
836 return 0;
837 error:
838 printf("Cannot read bus configuration: %d\n", ret);
839
840 return ret;
841 }
842
decode_regions(struct pci_controller * hose,ofnode parent_node,ofnode node)843 static void decode_regions(struct pci_controller *hose, ofnode parent_node,
844 ofnode node)
845 {
846 int pci_addr_cells, addr_cells, size_cells;
847 int cells_per_record;
848 const u32 *prop;
849 int len;
850 int i;
851
852 prop = ofnode_get_property(node, "ranges", &len);
853 if (!prop) {
854 debug("%s: Cannot decode regions\n", __func__);
855 return;
856 }
857
858 pci_addr_cells = ofnode_read_simple_addr_cells(node);
859 addr_cells = ofnode_read_simple_addr_cells(parent_node);
860 size_cells = ofnode_read_simple_size_cells(node);
861
862 /* PCI addresses are always 3-cells */
863 len /= sizeof(u32);
864 cells_per_record = pci_addr_cells + addr_cells + size_cells;
865 hose->region_count = 0;
866 debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
867 cells_per_record);
868 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) {
869 u64 pci_addr, addr, size;
870 int space_code;
871 u32 flags;
872 int type;
873 int pos;
874
875 if (len < cells_per_record)
876 break;
877 flags = fdt32_to_cpu(prop[0]);
878 space_code = (flags >> 24) & 3;
879 pci_addr = fdtdec_get_number(prop + 1, 2);
880 prop += pci_addr_cells;
881 addr = fdtdec_get_number(prop, addr_cells);
882 prop += addr_cells;
883 size = fdtdec_get_number(prop, size_cells);
884 prop += size_cells;
885 debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n",
886 __func__, hose->region_count, pci_addr, addr, size, space_code);
887 if (space_code & 2) {
888 type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
889 PCI_REGION_MEM;
890 } else if (space_code & 1) {
891 type = PCI_REGION_IO;
892 } else {
893 continue;
894 }
895
896 if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
897 type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
898 debug(" - beyond the 32-bit boundary, ignoring\n");
899 continue;
900 }
901
902 pos = -1;
903 for (i = 0; i < hose->region_count; i++) {
904 if (hose->regions[i].flags == type)
905 pos = i;
906 }
907 if (pos == -1)
908 pos = hose->region_count++;
909 debug(" - type=%d, pos=%d\n", type, pos);
910 pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
911 }
912
913 /* Add a region for our local memory */
914 #ifdef CONFIG_NR_DRAM_BANKS
915 bd_t *bd = gd->bd;
916
917 if (!bd)
918 return;
919
920 for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
921 if (bd->bi_dram[i].size) {
922 pci_set_region(hose->regions + hose->region_count++,
923 bd->bi_dram[i].start,
924 bd->bi_dram[i].start,
925 bd->bi_dram[i].size,
926 PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
927 }
928 }
929 #else
930 phys_addr_t base = 0, size;
931
932 size = gd->ram_size;
933 #ifdef CONFIG_SYS_SDRAM_BASE
934 base = CONFIG_SYS_SDRAM_BASE;
935 #endif
936 if (gd->pci_ram_top && gd->pci_ram_top < base + size)
937 size = gd->pci_ram_top - base;
938 if (size)
939 pci_set_region(hose->regions + hose->region_count++, base,
940 base, size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
941 #endif
942
943 return;
944 }
945
pci_uclass_pre_probe(struct udevice * bus)946 static int pci_uclass_pre_probe(struct udevice *bus)
947 {
948 struct pci_controller *hose;
949
950 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name,
951 bus->parent->name);
952 hose = bus->uclass_priv;
953
954 /* For bridges, use the top-level PCI controller */
955 if (!device_is_on_pci_bus(bus)) {
956 hose->ctlr = bus;
957 decode_regions(hose, dev_ofnode(bus->parent), dev_ofnode(bus));
958 } else {
959 struct pci_controller *parent_hose;
960
961 parent_hose = dev_get_uclass_priv(bus->parent);
962 hose->ctlr = parent_hose->bus;
963 }
964 hose->bus = bus;
965 hose->first_busno = bus->seq;
966 hose->last_busno = bus->seq;
967
968 return 0;
969 }
970
pci_uclass_post_probe(struct udevice * bus)971 static int pci_uclass_post_probe(struct udevice *bus)
972 {
973 int ret;
974
975 debug("%s: probing bus %d\n", __func__, bus->seq);
976 ret = pci_bind_bus_devices(bus);
977 if (ret)
978 return ret;
979
980 #ifdef CONFIG_PCI_PNP
981 ret = pci_auto_config_devices(bus);
982 if (ret < 0)
983 return ret;
984 #endif
985
986 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
987 /*
988 * Per Intel FSP specification, we should call FSP notify API to
989 * inform FSP that PCI enumeration has been done so that FSP will
990 * do any necessary initialization as required by the chipset's
991 * BIOS Writer's Guide (BWG).
992 *
993 * Unfortunately we have to put this call here as with driver model,
994 * the enumeration is all done on a lazy basis as needed, so until
995 * something is touched on PCI it won't happen.
996 *
997 * Note we only call this 1) after U-Boot is relocated, and 2)
998 * root bus has finished probing.
999 */
1000 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) {
1001 ret = fsp_init_phase_pci();
1002 if (ret)
1003 return ret;
1004 }
1005 #endif
1006
1007 return 0;
1008 }
1009
pci_get_devfn(struct udevice * dev)1010 int pci_get_devfn(struct udevice *dev)
1011 {
1012 struct fdt_pci_addr addr;
1013 int ret;
1014
1015 /* Extract the devfn from fdt_pci_addr */
1016 ret = ofnode_read_pci_addr(dev_ofnode(dev), FDT_PCI_SPACE_CONFIG,
1017 "reg", &addr);
1018 if (ret) {
1019 if (ret != -ENOENT)
1020 return -EINVAL;
1021 }
1022
1023 return addr.phys_hi & 0xff00;
1024 }
1025
pci_uclass_child_post_bind(struct udevice * dev)1026 static int pci_uclass_child_post_bind(struct udevice *dev)
1027 {
1028 struct pci_child_platdata *pplat;
1029
1030 if (!dev_of_valid(dev))
1031 return 0;
1032
1033 pplat = dev_get_parent_platdata(dev);
1034
1035 /* Extract vendor id and device id if available */
1036 ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device);
1037
1038 /* Extract the devfn from fdt_pci_addr */
1039 pplat->devfn = pci_get_devfn(dev);
1040
1041 return 0;
1042 }
1043
pci_bridge_read_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)1044 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf,
1045 uint offset, ulong *valuep,
1046 enum pci_size_t size)
1047 {
1048 struct pci_controller *hose = bus->uclass_priv;
1049
1050 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
1051 }
1052
pci_bridge_write_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)1053 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
1054 uint offset, ulong value,
1055 enum pci_size_t size)
1056 {
1057 struct pci_controller *hose = bus->uclass_priv;
1058
1059 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
1060 }
1061
skip_to_next_device(struct udevice * bus,struct udevice ** devp)1062 static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
1063 {
1064 struct udevice *dev;
1065 int ret = 0;
1066
1067 /*
1068 * Scan through all the PCI controllers. On x86 there will only be one
1069 * but that is not necessarily true on other hardware.
1070 */
1071 do {
1072 device_find_first_child(bus, &dev);
1073 if (dev) {
1074 *devp = dev;
1075 return 0;
1076 }
1077 ret = uclass_next_device(&bus);
1078 if (ret)
1079 return ret;
1080 } while (bus);
1081
1082 return 0;
1083 }
1084
pci_find_next_device(struct udevice ** devp)1085 int pci_find_next_device(struct udevice **devp)
1086 {
1087 struct udevice *child = *devp;
1088 struct udevice *bus = child->parent;
1089 int ret;
1090
1091 /* First try all the siblings */
1092 *devp = NULL;
1093 while (child) {
1094 device_find_next_child(&child);
1095 if (child) {
1096 *devp = child;
1097 return 0;
1098 }
1099 }
1100
1101 /* We ran out of siblings. Try the next bus */
1102 ret = uclass_next_device(&bus);
1103 if (ret)
1104 return ret;
1105
1106 return bus ? skip_to_next_device(bus, devp) : 0;
1107 }
1108
pci_find_first_device(struct udevice ** devp)1109 int pci_find_first_device(struct udevice **devp)
1110 {
1111 struct udevice *bus;
1112 int ret;
1113
1114 *devp = NULL;
1115 ret = uclass_first_device(UCLASS_PCI, &bus);
1116 if (ret)
1117 return ret;
1118
1119 return skip_to_next_device(bus, devp);
1120 }
1121
pci_conv_32_to_size(ulong value,uint offset,enum pci_size_t size)1122 ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
1123 {
1124 switch (size) {
1125 case PCI_SIZE_8:
1126 return (value >> ((offset & 3) * 8)) & 0xff;
1127 case PCI_SIZE_16:
1128 return (value >> ((offset & 2) * 8)) & 0xffff;
1129 default:
1130 return value;
1131 }
1132 }
1133
pci_conv_size_to_32(ulong old,ulong value,uint offset,enum pci_size_t size)1134 ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
1135 enum pci_size_t size)
1136 {
1137 uint off_mask;
1138 uint val_mask, shift;
1139 ulong ldata, mask;
1140
1141 switch (size) {
1142 case PCI_SIZE_8:
1143 off_mask = 3;
1144 val_mask = 0xff;
1145 break;
1146 case PCI_SIZE_16:
1147 off_mask = 2;
1148 val_mask = 0xffff;
1149 break;
1150 default:
1151 return value;
1152 }
1153 shift = (offset & off_mask) * 8;
1154 ldata = (value & val_mask) << shift;
1155 mask = val_mask << shift;
1156 value = (old & ~mask) | ldata;
1157
1158 return value;
1159 }
1160
pci_get_regions(struct udevice * dev,struct pci_region ** iop,struct pci_region ** memp,struct pci_region ** prefp)1161 int pci_get_regions(struct udevice *dev, struct pci_region **iop,
1162 struct pci_region **memp, struct pci_region **prefp)
1163 {
1164 struct udevice *bus = pci_get_controller(dev);
1165 struct pci_controller *hose = dev_get_uclass_priv(bus);
1166 int i;
1167
1168 *iop = NULL;
1169 *memp = NULL;
1170 *prefp = NULL;
1171 for (i = 0; i < hose->region_count; i++) {
1172 switch (hose->regions[i].flags) {
1173 case PCI_REGION_IO:
1174 if (!*iop || (*iop)->size < hose->regions[i].size)
1175 *iop = hose->regions + i;
1176 break;
1177 case PCI_REGION_MEM:
1178 if (!*memp || (*memp)->size < hose->regions[i].size)
1179 *memp = hose->regions + i;
1180 break;
1181 case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
1182 if (!*prefp || (*prefp)->size < hose->regions[i].size)
1183 *prefp = hose->regions + i;
1184 break;
1185 }
1186 }
1187
1188 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
1189 }
1190
dm_pci_read_bar32(struct udevice * dev,int barnum)1191 u32 dm_pci_read_bar32(struct udevice *dev, int barnum)
1192 {
1193 u32 addr;
1194 int bar;
1195
1196 bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1197 dm_pci_read_config32(dev, bar, &addr);
1198 if (addr & PCI_BASE_ADDRESS_SPACE_IO)
1199 return addr & PCI_BASE_ADDRESS_IO_MASK;
1200 else
1201 return addr & PCI_BASE_ADDRESS_MEM_MASK;
1202 }
1203
dm_pci_write_bar32(struct udevice * dev,int barnum,u32 addr)1204 void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
1205 {
1206 int bar;
1207
1208 bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1209 dm_pci_write_config32(dev, bar, addr);
1210 }
1211
_dm_pci_bus_to_phys(struct udevice * ctlr,pci_addr_t bus_addr,unsigned long flags,unsigned long skip_mask,phys_addr_t * pa)1212 static int _dm_pci_bus_to_phys(struct udevice *ctlr,
1213 pci_addr_t bus_addr, unsigned long flags,
1214 unsigned long skip_mask, phys_addr_t *pa)
1215 {
1216 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
1217 struct pci_region *res;
1218 int i;
1219
1220 if (hose->region_count == 0) {
1221 *pa = bus_addr;
1222 return 0;
1223 }
1224
1225 for (i = 0; i < hose->region_count; i++) {
1226 res = &hose->regions[i];
1227
1228 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
1229 continue;
1230
1231 if (res->flags & skip_mask)
1232 continue;
1233
1234 if (bus_addr >= res->bus_start &&
1235 (bus_addr - res->bus_start) < res->size) {
1236 *pa = (bus_addr - res->bus_start + res->phys_start);
1237 return 0;
1238 }
1239 }
1240
1241 return 1;
1242 }
1243
dm_pci_bus_to_phys(struct udevice * dev,pci_addr_t bus_addr,unsigned long flags)1244 phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
1245 unsigned long flags)
1246 {
1247 phys_addr_t phys_addr = 0;
1248 struct udevice *ctlr;
1249 int ret;
1250
1251 /* The root controller has the region information */
1252 ctlr = pci_get_controller(dev);
1253
1254 /*
1255 * if PCI_REGION_MEM is set we do a two pass search with preference
1256 * on matches that don't have PCI_REGION_SYS_MEMORY set
1257 */
1258 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
1259 ret = _dm_pci_bus_to_phys(ctlr, bus_addr,
1260 flags, PCI_REGION_SYS_MEMORY,
1261 &phys_addr);
1262 if (!ret)
1263 return phys_addr;
1264 }
1265
1266 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr);
1267
1268 if (ret)
1269 puts("pci_hose_bus_to_phys: invalid physical address\n");
1270
1271 return phys_addr;
1272 }
1273
_dm_pci_phys_to_bus(struct udevice * dev,phys_addr_t phys_addr,unsigned long flags,unsigned long skip_mask,pci_addr_t * ba)1274 int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1275 unsigned long flags, unsigned long skip_mask,
1276 pci_addr_t *ba)
1277 {
1278 struct pci_region *res;
1279 struct udevice *ctlr;
1280 pci_addr_t bus_addr;
1281 int i;
1282 struct pci_controller *hose;
1283
1284 /* The root controller has the region information */
1285 ctlr = pci_get_controller(dev);
1286 hose = dev_get_uclass_priv(ctlr);
1287
1288 if (hose->region_count == 0) {
1289 *ba = phys_addr;
1290 return 0;
1291 }
1292
1293 for (i = 0; i < hose->region_count; i++) {
1294 res = &hose->regions[i];
1295
1296 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
1297 continue;
1298
1299 if (res->flags & skip_mask)
1300 continue;
1301
1302 bus_addr = phys_addr - res->phys_start + res->bus_start;
1303
1304 if (bus_addr >= res->bus_start &&
1305 (bus_addr - res->bus_start) < res->size) {
1306 *ba = bus_addr;
1307 return 0;
1308 }
1309 }
1310
1311 return 1;
1312 }
1313
dm_pci_phys_to_bus(struct udevice * dev,phys_addr_t phys_addr,unsigned long flags)1314 pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1315 unsigned long flags)
1316 {
1317 pci_addr_t bus_addr = 0;
1318 int ret;
1319
1320 /*
1321 * if PCI_REGION_MEM is set we do a two pass search with preference
1322 * on matches that don't have PCI_REGION_SYS_MEMORY set
1323 */
1324 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
1325 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags,
1326 PCI_REGION_SYS_MEMORY, &bus_addr);
1327 if (!ret)
1328 return bus_addr;
1329 }
1330
1331 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr);
1332
1333 if (ret)
1334 puts("pci_hose_phys_to_bus: invalid physical address\n");
1335
1336 return bus_addr;
1337 }
1338
dm_pci_map_bar(struct udevice * dev,int bar,int flags)1339 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
1340 {
1341 pci_addr_t pci_bus_addr;
1342 u32 bar_response;
1343
1344 /* read BAR address */
1345 dm_pci_read_config32(dev, bar, &bar_response);
1346 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
1347
1348 /*
1349 * Pass "0" as the length argument to pci_bus_to_virt. The arg
1350 * isn't actualy used on any platform because u-boot assumes a static
1351 * linear mapping. In the future, this could read the BAR size
1352 * and pass that as the size if needed.
1353 */
1354 return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE);
1355 }
1356
_dm_pci_find_next_capability(struct udevice * dev,u8 pos,int cap)1357 static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
1358 {
1359 int ttl = PCI_FIND_CAP_TTL;
1360 u8 id;
1361 u16 ent;
1362
1363 dm_pci_read_config8(dev, pos, &pos);
1364
1365 while (ttl--) {
1366 if (pos < PCI_STD_HEADER_SIZEOF)
1367 break;
1368 pos &= ~3;
1369 dm_pci_read_config16(dev, pos, &ent);
1370
1371 id = ent & 0xff;
1372 if (id == 0xff)
1373 break;
1374 if (id == cap)
1375 return pos;
1376 pos = (ent >> 8);
1377 }
1378
1379 return 0;
1380 }
1381
dm_pci_find_next_capability(struct udevice * dev,u8 start,int cap)1382 int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap)
1383 {
1384 return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT,
1385 cap);
1386 }
1387
dm_pci_find_capability(struct udevice * dev,int cap)1388 int dm_pci_find_capability(struct udevice *dev, int cap)
1389 {
1390 u16 status;
1391 u8 header_type;
1392 u8 pos;
1393
1394 dm_pci_read_config16(dev, PCI_STATUS, &status);
1395 if (!(status & PCI_STATUS_CAP_LIST))
1396 return 0;
1397
1398 dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type);
1399 if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS)
1400 pos = PCI_CB_CAPABILITY_LIST;
1401 else
1402 pos = PCI_CAPABILITY_LIST;
1403
1404 return _dm_pci_find_next_capability(dev, pos, cap);
1405 }
1406
dm_pci_find_next_ext_capability(struct udevice * dev,int start,int cap)1407 int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap)
1408 {
1409 u32 header;
1410 int ttl;
1411 int pos = PCI_CFG_SPACE_SIZE;
1412
1413 /* minimum 8 bytes per capability */
1414 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1415
1416 if (start)
1417 pos = start;
1418
1419 dm_pci_read_config32(dev, pos, &header);
1420 /*
1421 * If we have no capabilities, this is indicated by cap ID,
1422 * cap version and next pointer all being 0.
1423 */
1424 if (header == 0)
1425 return 0;
1426
1427 while (ttl--) {
1428 if (PCI_EXT_CAP_ID(header) == cap)
1429 return pos;
1430
1431 pos = PCI_EXT_CAP_NEXT(header);
1432 if (pos < PCI_CFG_SPACE_SIZE)
1433 break;
1434
1435 dm_pci_read_config32(dev, pos, &header);
1436 }
1437
1438 return 0;
1439 }
1440
dm_pci_find_ext_capability(struct udevice * dev,int cap)1441 int dm_pci_find_ext_capability(struct udevice *dev, int cap)
1442 {
1443 return dm_pci_find_next_ext_capability(dev, 0, cap);
1444 }
1445
1446 UCLASS_DRIVER(pci) = {
1447 .id = UCLASS_PCI,
1448 .name = "pci",
1449 .flags = DM_UC_FLAG_SEQ_ALIAS,
1450 .post_bind = dm_scan_fdt_dev,
1451 .pre_probe = pci_uclass_pre_probe,
1452 .post_probe = pci_uclass_post_probe,
1453 .child_post_bind = pci_uclass_child_post_bind,
1454 .per_device_auto_alloc_size = sizeof(struct pci_controller),
1455 .per_child_platdata_auto_alloc_size =
1456 sizeof(struct pci_child_platdata),
1457 };
1458
1459 static const struct dm_pci_ops pci_bridge_ops = {
1460 .read_config = pci_bridge_read_config,
1461 .write_config = pci_bridge_write_config,
1462 };
1463
1464 static const struct udevice_id pci_bridge_ids[] = {
1465 { .compatible = "pci-bridge" },
1466 { }
1467 };
1468
1469 U_BOOT_DRIVER(pci_bridge_drv) = {
1470 .name = "pci_bridge_drv",
1471 .id = UCLASS_PCI,
1472 .of_match = pci_bridge_ids,
1473 .ops = &pci_bridge_ops,
1474 };
1475
1476 UCLASS_DRIVER(pci_generic) = {
1477 .id = UCLASS_PCI_GENERIC,
1478 .name = "pci_generic",
1479 };
1480
1481 static const struct udevice_id pci_generic_ids[] = {
1482 { .compatible = "pci-generic" },
1483 { }
1484 };
1485
1486 U_BOOT_DRIVER(pci_generic_drv) = {
1487 .name = "pci_generic_drv",
1488 .id = UCLASS_PCI_GENERIC,
1489 .of_match = pci_generic_ids,
1490 };
1491
pci_init(void)1492 void pci_init(void)
1493 {
1494 struct udevice *bus;
1495
1496 /*
1497 * Enumerate all known controller devices. Enumeration has the side-
1498 * effect of probing them, so PCIe devices will be enumerated too.
1499 */
1500 for (uclass_first_device(UCLASS_PCI, &bus);
1501 bus;
1502 uclass_next_device(&bus)) {
1503 ;
1504 }
1505 }
1506