1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2017 NXP
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
5 * Layerscape PCIe driver
6 */
7
8 #include <common.h>
9 #include <asm/arch/fsl_serdes.h>
10 #include <pci.h>
11 #include <asm/io.h>
12 #include <errno.h>
13 #include <malloc.h>
14 #include <dm.h>
15 #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
16 defined(CONFIG_ARM)
17 #include <asm/arch/clock.h>
18 #endif
19 #include "pcie_layerscape.h"
20
21 DECLARE_GLOBAL_DATA_PTR;
22
23 LIST_HEAD(ls_pcie_list);
24
dbi_readl(struct ls_pcie * pcie,unsigned int offset)25 static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset)
26 {
27 return in_le32(pcie->dbi + offset);
28 }
29
dbi_writel(struct ls_pcie * pcie,unsigned int value,unsigned int offset)30 static void dbi_writel(struct ls_pcie *pcie, unsigned int value,
31 unsigned int offset)
32 {
33 out_le32(pcie->dbi + offset, value);
34 }
35
ctrl_readl(struct ls_pcie * pcie,unsigned int offset)36 static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset)
37 {
38 if (pcie->big_endian)
39 return in_be32(pcie->ctrl + offset);
40 else
41 return in_le32(pcie->ctrl + offset);
42 }
43
ctrl_writel(struct ls_pcie * pcie,unsigned int value,unsigned int offset)44 static void ctrl_writel(struct ls_pcie *pcie, unsigned int value,
45 unsigned int offset)
46 {
47 if (pcie->big_endian)
48 out_be32(pcie->ctrl + offset, value);
49 else
50 out_le32(pcie->ctrl + offset, value);
51 }
52
ls_pcie_ltssm(struct ls_pcie * pcie)53 static int ls_pcie_ltssm(struct ls_pcie *pcie)
54 {
55 u32 state;
56 uint svr;
57
58 svr = get_svr();
59 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
60 state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx));
61 state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
62 } else {
63 state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK;
64 }
65
66 return state;
67 }
68
ls_pcie_link_up(struct ls_pcie * pcie)69 static int ls_pcie_link_up(struct ls_pcie *pcie)
70 {
71 int ltssm;
72
73 ltssm = ls_pcie_ltssm(pcie);
74 if (ltssm < LTSSM_PCIE_L0)
75 return 0;
76
77 return 1;
78 }
79
ls_pcie_cfg0_set_busdev(struct ls_pcie * pcie,u32 busdev)80 static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
81 {
82 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
83 PCIE_ATU_VIEWPORT);
84 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
85 }
86
ls_pcie_cfg1_set_busdev(struct ls_pcie * pcie,u32 busdev)87 static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
88 {
89 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
90 PCIE_ATU_VIEWPORT);
91 dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
92 }
93
ls_pcie_atu_outbound_set(struct ls_pcie * pcie,int idx,int type,u64 phys,u64 bus_addr,pci_size_t size)94 static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type,
95 u64 phys, u64 bus_addr, pci_size_t size)
96 {
97 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT);
98 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE);
99 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE);
100 dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT);
101 dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET);
102 dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET);
103 dbi_writel(pcie, type, PCIE_ATU_CR1);
104 dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
105 }
106
107 /* Use bar match mode and MEM type as default */
ls_pcie_atu_inbound_set(struct ls_pcie * pcie,int idx,int bar,u64 phys)108 static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx,
109 int bar, u64 phys)
110 {
111 dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT);
112 dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET);
113 dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET);
114 dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
115 dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
116 PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2);
117 }
118
ls_pcie_dump_atu(struct ls_pcie * pcie)119 static void ls_pcie_dump_atu(struct ls_pcie *pcie)
120 {
121 int i;
122
123 for (i = 0; i < PCIE_ATU_REGION_NUM; i++) {
124 dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i,
125 PCIE_ATU_VIEWPORT);
126 debug("iATU%d:\n", i);
127 debug("\tLOWER PHYS 0x%08x\n",
128 dbi_readl(pcie, PCIE_ATU_LOWER_BASE));
129 debug("\tUPPER PHYS 0x%08x\n",
130 dbi_readl(pcie, PCIE_ATU_UPPER_BASE));
131 debug("\tLOWER BUS 0x%08x\n",
132 dbi_readl(pcie, PCIE_ATU_LOWER_TARGET));
133 debug("\tUPPER BUS 0x%08x\n",
134 dbi_readl(pcie, PCIE_ATU_UPPER_TARGET));
135 debug("\tLIMIT 0x%08x\n",
136 readl(pcie->dbi + PCIE_ATU_LIMIT));
137 debug("\tCR1 0x%08x\n",
138 dbi_readl(pcie, PCIE_ATU_CR1));
139 debug("\tCR2 0x%08x\n",
140 dbi_readl(pcie, PCIE_ATU_CR2));
141 }
142 }
143
ls_pcie_setup_atu(struct ls_pcie * pcie)144 static void ls_pcie_setup_atu(struct ls_pcie *pcie)
145 {
146 struct pci_region *io, *mem, *pref;
147 unsigned long long offset = 0;
148 int idx = 0;
149 uint svr;
150
151 svr = get_svr();
152 if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
153 offset = LS1021_PCIE_SPACE_OFFSET +
154 LS1021_PCIE_SPACE_SIZE * pcie->idx;
155 }
156
157 /* ATU 0 : OUTBOUND : CFG0 */
158 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
159 PCIE_ATU_TYPE_CFG0,
160 pcie->cfg_res.start + offset,
161 0,
162 fdt_resource_size(&pcie->cfg_res) / 2);
163 /* ATU 1 : OUTBOUND : CFG1 */
164 ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
165 PCIE_ATU_TYPE_CFG1,
166 pcie->cfg_res.start + offset +
167 fdt_resource_size(&pcie->cfg_res) / 2,
168 0,
169 fdt_resource_size(&pcie->cfg_res) / 2);
170
171 pci_get_regions(pcie->bus, &io, &mem, &pref);
172 idx = PCIE_ATU_REGION_INDEX1 + 1;
173
174 /* Fix the pcie memory map for LS2088A series SoCs */
175 svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
176 if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
177 svr == SVR_LS2048A || svr == SVR_LS2044A ||
178 svr == SVR_LS2081A || svr == SVR_LS2041A) {
179 if (io)
180 io->phys_start = (io->phys_start &
181 (PCIE_PHYS_SIZE - 1)) +
182 LS2088A_PCIE1_PHYS_ADDR +
183 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
184 if (mem)
185 mem->phys_start = (mem->phys_start &
186 (PCIE_PHYS_SIZE - 1)) +
187 LS2088A_PCIE1_PHYS_ADDR +
188 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
189 if (pref)
190 pref->phys_start = (pref->phys_start &
191 (PCIE_PHYS_SIZE - 1)) +
192 LS2088A_PCIE1_PHYS_ADDR +
193 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
194 }
195
196 if (io)
197 /* ATU : OUTBOUND : IO */
198 ls_pcie_atu_outbound_set(pcie, idx++,
199 PCIE_ATU_TYPE_IO,
200 io->phys_start + offset,
201 io->bus_start,
202 io->size);
203
204 if (mem)
205 /* ATU : OUTBOUND : MEM */
206 ls_pcie_atu_outbound_set(pcie, idx++,
207 PCIE_ATU_TYPE_MEM,
208 mem->phys_start + offset,
209 mem->bus_start,
210 mem->size);
211
212 if (pref)
213 /* ATU : OUTBOUND : pref */
214 ls_pcie_atu_outbound_set(pcie, idx++,
215 PCIE_ATU_TYPE_MEM,
216 pref->phys_start + offset,
217 pref->bus_start,
218 pref->size);
219
220 ls_pcie_dump_atu(pcie);
221 }
222
223 /* Return 0 if the address is valid, -errno if not valid */
ls_pcie_addr_valid(struct ls_pcie * pcie,pci_dev_t bdf)224 static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf)
225 {
226 struct udevice *bus = pcie->bus;
227
228 if (pcie->mode == PCI_HEADER_TYPE_NORMAL)
229 return -ENODEV;
230
231 if (!pcie->enabled)
232 return -ENXIO;
233
234 if (PCI_BUS(bdf) < bus->seq)
235 return -EINVAL;
236
237 if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie)))
238 return -EINVAL;
239
240 if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
241 return -EINVAL;
242
243 return 0;
244 }
245
ls_pcie_conf_address(struct udevice * bus,pci_dev_t bdf,uint offset,void ** paddress)246 int ls_pcie_conf_address(struct udevice *bus, pci_dev_t bdf,
247 uint offset, void **paddress)
248 {
249 struct ls_pcie *pcie = dev_get_priv(bus);
250 u32 busdev;
251
252 if (ls_pcie_addr_valid(pcie, bdf))
253 return -EINVAL;
254
255 if (PCI_BUS(bdf) == bus->seq) {
256 *paddress = pcie->dbi + offset;
257 return 0;
258 }
259
260 busdev = PCIE_ATU_BUS(PCI_BUS(bdf) - bus->seq) |
261 PCIE_ATU_DEV(PCI_DEV(bdf)) |
262 PCIE_ATU_FUNC(PCI_FUNC(bdf));
263
264 if (PCI_BUS(bdf) == bus->seq + 1) {
265 ls_pcie_cfg0_set_busdev(pcie, busdev);
266 *paddress = pcie->cfg0 + offset;
267 } else {
268 ls_pcie_cfg1_set_busdev(pcie, busdev);
269 *paddress = pcie->cfg1 + offset;
270 }
271 return 0;
272 }
273
ls_pcie_read_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)274 static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf,
275 uint offset, ulong *valuep,
276 enum pci_size_t size)
277 {
278 return pci_generic_mmap_read_config(bus, ls_pcie_conf_address,
279 bdf, offset, valuep, size);
280 }
281
ls_pcie_write_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)282 static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf,
283 uint offset, ulong value,
284 enum pci_size_t size)
285 {
286 return pci_generic_mmap_write_config(bus, ls_pcie_conf_address,
287 bdf, offset, value, size);
288 }
289
290 /* Clear multi-function bit */
ls_pcie_clear_multifunction(struct ls_pcie * pcie)291 static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
292 {
293 writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
294 }
295
296 /* Fix class value */
ls_pcie_fix_class(struct ls_pcie * pcie)297 static void ls_pcie_fix_class(struct ls_pcie *pcie)
298 {
299 writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
300 }
301
302 /* Drop MSG TLP except for Vendor MSG */
ls_pcie_drop_msg_tlp(struct ls_pcie * pcie)303 static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
304 {
305 u32 val;
306
307 val = dbi_readl(pcie, PCIE_STRFMR1);
308 val &= 0xDFFFFFFF;
309 dbi_writel(pcie, val, PCIE_STRFMR1);
310 }
311
312 /* Disable all bars in RC mode */
ls_pcie_disable_bars(struct ls_pcie * pcie)313 static void ls_pcie_disable_bars(struct ls_pcie *pcie)
314 {
315 u32 sriov;
316
317 sriov = in_le32(pcie->dbi + PCIE_SRIOV);
318
319 /*
320 * TODO: For PCIe controller with SRIOV, the method to disable bars
321 * is different and more complex, so will add later.
322 */
323 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
324 return;
325
326 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0);
327 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1);
328 dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1);
329 }
330
ls_pcie_setup_ctrl(struct ls_pcie * pcie)331 static void ls_pcie_setup_ctrl(struct ls_pcie *pcie)
332 {
333 ls_pcie_setup_atu(pcie);
334
335 dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN);
336 ls_pcie_fix_class(pcie);
337 ls_pcie_clear_multifunction(pcie);
338 ls_pcie_drop_msg_tlp(pcie);
339 dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN);
340
341 ls_pcie_disable_bars(pcie);
342 }
343
ls_pcie_ep_setup_atu(struct ls_pcie * pcie)344 static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie)
345 {
346 u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
347
348 /* ATU 0 : INBOUND : map BAR0 */
349 ls_pcie_atu_inbound_set(pcie, 0, 0, phys);
350 /* ATU 1 : INBOUND : map BAR1 */
351 phys += PCIE_BAR1_SIZE;
352 ls_pcie_atu_inbound_set(pcie, 1, 1, phys);
353 /* ATU 2 : INBOUND : map BAR2 */
354 phys += PCIE_BAR2_SIZE;
355 ls_pcie_atu_inbound_set(pcie, 2, 2, phys);
356 /* ATU 3 : INBOUND : map BAR4 */
357 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
358 ls_pcie_atu_inbound_set(pcie, 3, 4, phys);
359
360 /* ATU 0 : OUTBOUND : map MEM */
361 ls_pcie_atu_outbound_set(pcie, 0,
362 PCIE_ATU_TYPE_MEM,
363 pcie->cfg_res.start,
364 0,
365 CONFIG_SYS_PCI_MEMORY_SIZE);
366 }
367
368 /* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
ls_pcie_ep_setup_bar(void * bar_base,int bar,u32 size)369 static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
370 {
371 /* The least inbound window is 4KiB */
372 if (size < 4 * 1024)
373 return;
374
375 switch (bar) {
376 case 0:
377 writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
378 break;
379 case 1:
380 writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
381 break;
382 case 2:
383 writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
384 writel(0, bar_base + PCI_BASE_ADDRESS_3);
385 break;
386 case 4:
387 writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
388 writel(0, bar_base + PCI_BASE_ADDRESS_5);
389 break;
390 default:
391 break;
392 }
393 }
394
ls_pcie_ep_setup_bars(void * bar_base)395 static void ls_pcie_ep_setup_bars(void *bar_base)
396 {
397 /* BAR0 - 32bit - 4K configuration */
398 ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
399 /* BAR1 - 32bit - 8K MSIX*/
400 ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
401 /* BAR2 - 64bit - 4K MEM desciptor */
402 ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
403 /* BAR4 - 64bit - 1M MEM*/
404 ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
405 }
406
ls_pcie_ep_enable_cfg(struct ls_pcie * pcie)407 static void ls_pcie_ep_enable_cfg(struct ls_pcie *pcie)
408 {
409 ctrl_writel(pcie, PCIE_CONFIG_READY, PCIE_PF_CONFIG);
410 }
411
ls_pcie_setup_ep(struct ls_pcie * pcie)412 static void ls_pcie_setup_ep(struct ls_pcie *pcie)
413 {
414 u32 sriov;
415
416 sriov = readl(pcie->dbi + PCIE_SRIOV);
417 if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
418 int pf, vf;
419
420 for (pf = 0; pf < PCIE_PF_NUM; pf++) {
421 for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
422 ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf),
423 PCIE_PF_VF_CTRL);
424
425 ls_pcie_ep_setup_bars(pcie->dbi);
426 ls_pcie_ep_setup_atu(pcie);
427 }
428 }
429 /* Disable CFG2 */
430 ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
431 } else {
432 ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
433 ls_pcie_ep_setup_atu(pcie);
434 }
435
436 ls_pcie_ep_enable_cfg(pcie);
437 }
438
ls_pcie_probe(struct udevice * dev)439 static int ls_pcie_probe(struct udevice *dev)
440 {
441 struct ls_pcie *pcie = dev_get_priv(dev);
442 const void *fdt = gd->fdt_blob;
443 int node = dev_of_offset(dev);
444 u16 link_sta;
445 uint svr;
446 int ret;
447 fdt_size_t cfg_size;
448
449 pcie->bus = dev;
450
451 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
452 "dbi", &pcie->dbi_res);
453 if (ret) {
454 printf("ls-pcie: resource \"dbi\" not found\n");
455 return ret;
456 }
457
458 pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE;
459
460 list_add(&pcie->list, &ls_pcie_list);
461
462 pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
463 if (!pcie->enabled) {
464 printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
465 return 0;
466 }
467
468 pcie->dbi = map_physmem(pcie->dbi_res.start,
469 fdt_resource_size(&pcie->dbi_res),
470 MAP_NOCACHE);
471
472 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
473 "lut", &pcie->lut_res);
474 if (!ret)
475 pcie->lut = map_physmem(pcie->lut_res.start,
476 fdt_resource_size(&pcie->lut_res),
477 MAP_NOCACHE);
478
479 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
480 "ctrl", &pcie->ctrl_res);
481 if (!ret)
482 pcie->ctrl = map_physmem(pcie->ctrl_res.start,
483 fdt_resource_size(&pcie->ctrl_res),
484 MAP_NOCACHE);
485 if (!pcie->ctrl)
486 pcie->ctrl = pcie->lut;
487
488 if (!pcie->ctrl) {
489 printf("%s: NOT find CTRL\n", dev->name);
490 return -1;
491 }
492
493 ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
494 "config", &pcie->cfg_res);
495 if (ret) {
496 printf("%s: resource \"config\" not found\n", dev->name);
497 return ret;
498 }
499
500 /*
501 * Fix the pcie memory map address and PF control registers address
502 * for LS2088A series SoCs
503 */
504 svr = get_svr();
505 svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
506 if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
507 svr == SVR_LS2048A || svr == SVR_LS2044A ||
508 svr == SVR_LS2081A || svr == SVR_LS2041A) {
509 cfg_size = fdt_resource_size(&pcie->cfg_res);
510 pcie->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR +
511 LS2088A_PCIE_PHYS_SIZE * pcie->idx;
512 pcie->cfg_res.end = pcie->cfg_res.start + cfg_size;
513 pcie->ctrl = pcie->lut + 0x40000;
514 }
515
516 pcie->cfg0 = map_physmem(pcie->cfg_res.start,
517 fdt_resource_size(&pcie->cfg_res),
518 MAP_NOCACHE);
519 pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2;
520
521 pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
522
523 debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n",
524 dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut,
525 (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0,
526 pcie->big_endian);
527
528 pcie->mode = readb(pcie->dbi + PCI_HEADER_TYPE) & 0x7f;
529
530 if (pcie->mode == PCI_HEADER_TYPE_NORMAL) {
531 printf("PCIe%u: %s %s", pcie->idx, dev->name, "Endpoint");
532 ls_pcie_setup_ep(pcie);
533 } else {
534 printf("PCIe%u: %s %s", pcie->idx, dev->name, "Root Complex");
535 ls_pcie_setup_ctrl(pcie);
536 }
537
538 if (!ls_pcie_link_up(pcie)) {
539 /* Let the user know there's no PCIe link */
540 printf(": no link\n");
541 return 0;
542 }
543
544 /* Print the negotiated PCIe link width */
545 link_sta = readw(pcie->dbi + PCIE_LINK_STA);
546 printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
547 link_sta & PCIE_LINK_SPEED_MASK);
548
549 return 0;
550 }
551
552 static const struct dm_pci_ops ls_pcie_ops = {
553 .read_config = ls_pcie_read_config,
554 .write_config = ls_pcie_write_config,
555 };
556
557 static const struct udevice_id ls_pcie_ids[] = {
558 { .compatible = "fsl,ls-pcie" },
559 { }
560 };
561
562 U_BOOT_DRIVER(pci_layerscape) = {
563 .name = "pci_layerscape",
564 .id = UCLASS_PCI,
565 .of_match = ls_pcie_ids,
566 .ops = &ls_pcie_ops,
567 .probe = ls_pcie_probe,
568 .priv_auto_alloc_size = sizeof(struct ls_pcie),
569 };
570