1 // SPDX-License-Identifier: GPL-2.0+ 2 #include <common.h> 3 #include <dm.h> 4 #include <reset.h> 5 #include <fdtdec.h> 6 #include <pci.h> 7 #include <asm/io.h> 8 #include <asm/arch/ahbc_aspeed.h> 9 #include "pcie_aspeed.h" 10 11 DECLARE_GLOBAL_DATA_PTR; 12 13 struct pcie_aspeed { 14 struct aspeed_h2x_reg *h2x_reg; 15 }; 16 17 static u8 txTag; 18 19 static void aspeed_pcie_cfg_read(struct pcie_aspeed *pcie, pci_dev_t bdf, 20 uint offset, ulong *valuep) 21 { 22 struct aspeed_h2x_reg *h2x_reg = pcie->h2x_reg; 23 u32 timeout = 0; 24 u32 bdf_offset; 25 u32 type = 0; 26 int rx_done_fail = 0; 27 28 //H2X80[4] (unlock) is write-only. 29 //Driver may set H2X80/H2XC0[4]=1 before triggering next TX config. 30 writel(BIT(4) | readl(&h2x_reg->h2x_rc_l_ctrl), 31 &h2x_reg->h2x_rc_l_ctrl); 32 writel(BIT(4) | readl(&h2x_reg->h2x_rc_h_ctrl), 33 &h2x_reg->h2x_rc_h_ctrl); 34 35 if (PCI_BUS(bdf) == 0) 36 type = 0; 37 else 38 type = 1; 39 40 bdf_offset = (PCI_BUS(bdf) << 24) | 41 (PCI_DEV(bdf) << 19) | 42 (PCI_FUNC(bdf) << 16) | 43 (offset & ~3); 44 45 txTag %= 0x7; 46 47 writel(0x04000001 | (type << 24), &h2x_reg->h2x_tx_desc3); 48 writel(0x0000200f | (txTag << 8), &h2x_reg->h2x_tx_desc2); 49 writel(bdf_offset, &h2x_reg->h2x_tx_desc1); 50 writel(0x00000000, &h2x_reg->h2x_tx_desc0); 51 52 //trigger tx 53 writel(PCIE_TRIGGER_TX, &h2x_reg->h2x_reg24); 54 55 //wait tx idle 56 while (!(readl(&h2x_reg->h2x_reg24) & PCIE_TX_IDLE)) { 57 timeout++; 58 if (timeout > 1000) { 59 *valuep = 0xffffffff; 60 goto out; 61 } 62 }; 63 64 //write clr tx idle 65 writel(1, &h2x_reg->h2x_reg08); 66 67 timeout = 0; 68 //check tx status 69 switch (readl(&h2x_reg->h2x_reg24) & PCIE_STATUS_OF_TX) { 70 case PCIE_RC_L_TX_COMPLETE: 71 while (!(readl(&h2x_reg->h2x_rc_l_isr) & PCIE_RC_RX_DONE_ISR)) { 72 timeout++; 73 if (timeout > 10) { 74 rx_done_fail = 1; 75 *valuep = 0xffffffff; 76 break; 77 } 78 mdelay(1); 79 } 80 if (!rx_done_fail) { 81 if (readl(&h2x_reg->h2x_rc_l_rxdesc2) & BIT(13)) 82 *valuep = 0xffffffff; 83 else 84 *valuep = readl(&h2x_reg->h2x_rc_l_rdata); 85 } 86 writel(BIT(4) | readl(&h2x_reg->h2x_rc_l_ctrl), 87 &h2x_reg->h2x_rc_l_ctrl); 88 writel(readl(&h2x_reg->h2x_rc_l_isr), 89 &h2x_reg->h2x_rc_l_isr); 90 break; 91 case PCIE_RC_H_TX_COMPLETE: 92 while (!(readl(&h2x_reg->h2x_rc_h_isr) & PCIE_RC_RX_DONE_ISR)) { 93 timeout++; 94 if (timeout > 10) { 95 rx_done_fail = 1; 96 *valuep = 0xffffffff; 97 break; 98 } 99 mdelay(1); 100 } 101 if (!rx_done_fail) { 102 if (readl(&h2x_reg->h2x_rc_h_rxdesc2) & BIT(13)) 103 *valuep = 0xffffffff; 104 else 105 *valuep = readl(&h2x_reg->h2x_rc_h_rdata); 106 } 107 writel(BIT(4) | readl(&h2x_reg->h2x_rc_h_ctrl), 108 &h2x_reg->h2x_rc_h_ctrl); 109 writel(readl(&h2x_reg->h2x_rc_h_isr), &h2x_reg->h2x_rc_h_isr); 110 break; 111 default: //read rc data 112 *valuep = readl(&h2x_reg->h2x_rdata); 113 break; 114 } 115 116 out: 117 txTag++; 118 } 119 120 static void aspeed_pcie_cfg_write(struct pcie_aspeed *pcie, pci_dev_t bdf, 121 uint offset, ulong value, 122 enum pci_size_t size) 123 { 124 struct aspeed_h2x_reg *h2x_reg = pcie->h2x_reg; 125 u32 timeout = 0; 126 u32 type = 0; 127 u32 bdf_offset; 128 u8 byte_en = 0; 129 130 writel(BIT(4) | readl(&h2x_reg->h2x_rc_l_ctrl), 131 &h2x_reg->h2x_rc_l_ctrl); 132 writel(BIT(4) | readl(&h2x_reg->h2x_rc_h_ctrl), 133 &h2x_reg->h2x_rc_h_ctrl); 134 135 switch (size) { 136 case PCI_SIZE_8: 137 switch (offset % 4) { 138 case 0: 139 byte_en = 0x1; 140 break; 141 case 1: 142 byte_en = 0x2; 143 break; 144 case 2: 145 byte_en = 0x4; 146 break; 147 case 3: 148 byte_en = 0x8; 149 break; 150 } 151 break; 152 case PCI_SIZE_16: 153 switch ((offset >> 1) % 2) { 154 case 0: 155 byte_en = 0x3; 156 break; 157 case 1: 158 byte_en = 0xc; 159 break; 160 } 161 break; 162 default: 163 byte_en = 0xf; 164 break; 165 } 166 167 if (PCI_BUS(bdf) == 0) 168 type = 0; 169 else 170 type = 1; 171 172 bdf_offset = (PCI_BUS(bdf) << 24) | 173 (PCI_DEV(bdf) << 19) | 174 (PCI_FUNC(bdf) << 16) | 175 (offset & ~3); 176 177 txTag %= 0x7; 178 179 writel(0x44000001 | (type << 24), &h2x_reg->h2x_tx_desc3); 180 writel(0x00002000 | (txTag << 8) | byte_en, &h2x_reg->h2x_tx_desc2); 181 writel(bdf_offset, &h2x_reg->h2x_tx_desc1); 182 writel(0x00000000, &h2x_reg->h2x_tx_desc0); 183 184 value = pci_conv_size_to_32(0x0, value, offset, size); 185 186 writel(value, &h2x_reg->h2x_tx_data); 187 188 //trigger tx 189 writel(1, &h2x_reg->h2x_reg24); 190 191 //wait tx idle 192 while (!(readl(&h2x_reg->h2x_reg24) & BIT(31))) { 193 timeout++; 194 if (timeout > 1000) 195 goto out; 196 }; 197 198 //write clr tx idle 199 writel(1, &h2x_reg->h2x_reg08); 200 201 timeout = 0; 202 //check tx status and clr rx done int 203 switch (readl(&h2x_reg->h2x_reg24) & PCIE_STATUS_OF_TX) { 204 case PCIE_RC_L_TX_COMPLETE: 205 while (!(readl(&h2x_reg->h2x_rc_l_isr) & PCIE_RC_RX_DONE_ISR)) { 206 timeout++; 207 if (timeout > 10) 208 break; 209 mdelay(1); 210 } 211 writel(PCIE_RC_RX_DONE_ISR, &h2x_reg->h2x_rc_l_isr); 212 break; 213 case PCIE_RC_H_TX_COMPLETE: 214 while (!(readl(&h2x_reg->h2x_rc_h_isr) & PCIE_RC_RX_DONE_ISR)) { 215 timeout++; 216 if (timeout > 10) 217 break; 218 mdelay(1); 219 } 220 writel(PCIE_RC_RX_DONE_ISR, &h2x_reg->h2x_rc_h_isr); 221 break; 222 } 223 224 out: 225 txTag++; 226 } 227 228 static int pcie_aspeed_read_config(struct udevice *bus, pci_dev_t bdf, 229 uint offset, ulong *valuep, 230 enum pci_size_t size) 231 { 232 struct pcie_aspeed *pcie = dev_get_priv(bus); 233 234 debug("PCIE CFG read: (b,d,f)=(%2d,%2d,%2d)\n", 235 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); 236 237 /* Only allow one other device besides the local one on the local bus */ 238 if (PCI_BUS(bdf) == 1 && PCI_DEV(bdf) > 0) { 239 debug("- out of range\n"); 240 /* 241 * If local dev is 0, the first other dev can 242 * only be 1 243 */ 244 *valuep = pci_get_ff(size); 245 return 0; 246 } 247 248 if (PCI_BUS(bdf) == 2 && PCI_DEV(bdf) > 0) { 249 debug("- out of range\n"); 250 /* 251 * If local dev is 0, the first other dev can 252 * only be 1 253 */ 254 *valuep = pci_get_ff(size); 255 return 0; 256 } 257 258 aspeed_pcie_cfg_read(pcie, bdf, offset, valuep); 259 260 *valuep = pci_conv_32_to_size(*valuep, offset, size); 261 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, *valuep); 262 263 return 0; 264 } 265 266 static int pcie_aspeed_write_config(struct udevice *bus, pci_dev_t bdf, 267 uint offset, ulong value, 268 enum pci_size_t size) 269 { 270 struct pcie_aspeed *pcie = dev_get_priv(bus); 271 272 debug("PCIE CFG write: (b,d,f)=(%2d,%2d,%2d) ", 273 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); 274 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value); 275 276 aspeed_pcie_cfg_write(pcie, bdf, offset, value, size); 277 278 return 0; 279 } 280 281 void aspeed_pcie_set_slot_power_limit(struct pcie_aspeed *pcie, int slot) 282 { 283 u32 timeout = 0; 284 struct aspeed_h2x_reg *h2x_reg = pcie->h2x_reg; 285 286 //optional : set_slot_power_limit 287 switch (slot) { 288 case 0: 289 writel(BIT(4) | readl(&h2x_reg->h2x_rc_l_ctrl), 290 &h2x_reg->h2x_rc_l_ctrl); 291 break; 292 case 1: 293 writel(BIT(4) | readl(&h2x_reg->h2x_rc_h_ctrl), 294 &h2x_reg->h2x_rc_h_ctrl); 295 break; 296 } 297 298 writel(0x74000001, &h2x_reg->h2x_tx_desc3); 299 300 switch (slot) { 301 case 0: //write for 0.8.0 302 writel(0x00400050, &h2x_reg->h2x_tx_desc2); 303 break; 304 case 1: //write for 0.4.0 305 writel(0x00200050, &h2x_reg->h2x_tx_desc2); 306 break; 307 } 308 309 writel(0x0, &h2x_reg->h2x_tx_desc1); 310 writel(0x0, &h2x_reg->h2x_tx_desc0); 311 312 writel(0x1a, &h2x_reg->h2x_tx_data); 313 314 //trigger tx 315 writel(PCIE_TRIGGER_TX, &h2x_reg->h2x_reg24); 316 317 //wait tx idle 318 while (!(readl(&h2x_reg->h2x_reg24) & BIT(31))) { 319 timeout++; 320 if (timeout > 1000) 321 return; 322 }; 323 324 //write clr tx idle 325 writel(1, &h2x_reg->h2x_reg08); 326 timeout = 0; 327 328 switch (slot) { 329 case 0: 330 //check tx status and clr rx done int 331 while (!(readl(&h2x_reg->h2x_rc_l_isr) & PCIE_RC_RX_DONE_ISR)) { 332 timeout++; 333 if (timeout > 10) 334 break; 335 mdelay(1); 336 } 337 writel(PCIE_RC_RX_DONE_ISR, &h2x_reg->h2x_rc_l_isr); 338 break; 339 case 1: 340 //check tx status and clr rx done int 341 while (!(readl(&h2x_reg->h2x_rc_h_isr) & PCIE_RC_RX_DONE_ISR)) { 342 timeout++; 343 if (timeout > 10) 344 break; 345 mdelay(1); 346 } 347 writel(PCIE_RC_RX_DONE_ISR, &h2x_reg->h2x_rc_h_isr); 348 break; 349 } 350 } 351 352 void aspeed_pcie_rc_slot_enable(struct pcie_aspeed *pcie, int slot) 353 354 { 355 struct aspeed_h2x_reg *h2x_reg = pcie->h2x_reg; 356 357 switch (slot) { 358 case 0: 359 //rc_l 360 writel(PCIE_RX_LINEAR | PCIE_RX_MSI_EN | 361 PCIE_WAIT_RX_TLP_CLR | 362 PCIE_RC_RX_ENABLE | PCIE_RC_ENABLE, 363 &h2x_reg->h2x_rc_l_ctrl); 364 //assign debug tx tag 365 writel((u32)&h2x_reg->h2x_rc_l_ctrl, &h2x_reg->h2x_rc_l_tx_tag); 366 break; 367 case 1: 368 //rc_h 369 writel(PCIE_RX_LINEAR | PCIE_RX_MSI_EN | 370 PCIE_WAIT_RX_TLP_CLR | 371 PCIE_RC_RX_ENABLE | PCIE_RC_ENABLE, 372 &h2x_reg->h2x_rc_h_ctrl); 373 //assign debug tx tag 374 writel((u32)&h2x_reg->h2x_rc_h_ctrl, &h2x_reg->h2x_rc_h_tx_tag); 375 break; 376 } 377 } 378 379 static int pcie_aspeed_probe(struct udevice *dev) 380 { 381 void *fdt = (void *)gd->fdt_blob; 382 struct reset_ctl reset_ctl; 383 struct pcie_aspeed *pcie = (struct pcie_aspeed *)dev_get_priv(dev); 384 struct aspeed_h2x_reg *h2x_reg = pcie->h2x_reg; 385 struct udevice *ahbc_dev, *slot0_dev, *slot1_dev; 386 int slot0_of_handle, slot1_of_handle; 387 int ret = 0; 388 389 txTag = 0; 390 ret = reset_get_by_index(dev, 0, &reset_ctl); 391 if (ret) { 392 printf("%s(): Failed to get reset signal\n", __func__); 393 return ret; 394 } 395 396 reset_assert(&reset_ctl); 397 mdelay(100); 398 reset_deassert(&reset_ctl); 399 400 ret = uclass_get_device_by_driver 401 (UCLASS_MISC, DM_GET_DRIVER(aspeed_ahbc), &ahbc_dev); 402 if (ret) { 403 debug("ahbc device not defined\n"); 404 return ret; 405 } 406 aspeed_ahbc_remap_enable(devfdt_get_addr_ptr(ahbc_dev)); 407 408 //init 409 writel(0x1, &h2x_reg->h2x_reg00); 410 411 //ahb to pcie rc 412 writel(0xe0006000, &h2x_reg->h2x_reg60); 413 writel(0x0, &h2x_reg->h2x_reg64); 414 writel(0xFFFFFFFF, &h2x_reg->h2x_reg68); 415 416 slot0_of_handle = 417 fdtdec_lookup_phandle(fdt, dev_of_offset(dev), "slot0-handle"); 418 if (slot0_of_handle) { 419 aspeed_pcie_rc_slot_enable(pcie, 0); 420 if (uclass_get_device_by_of_offset 421 (UCLASS_MISC, slot0_of_handle, &slot0_dev)) 422 goto slot1; 423 if (aspeed_rc_bridge_link_status(slot0_dev)) 424 aspeed_pcie_set_slot_power_limit(pcie, 0); 425 } 426 427 slot1: 428 slot1_of_handle = 429 fdtdec_lookup_phandle(fdt, dev_of_offset(dev), "slot1-handle"); 430 if (slot1_of_handle) { 431 aspeed_pcie_rc_slot_enable(pcie, 1); 432 if (uclass_get_device_by_of_offset 433 (UCLASS_MISC, slot1_of_handle, &slot1_dev)) 434 goto end; 435 if (aspeed_rc_bridge_link_status(slot1_dev)) 436 aspeed_pcie_set_slot_power_limit(pcie, 1); 437 } 438 439 end: 440 return 0; 441 } 442 443 static int pcie_aspeed_ofdata_to_platdata(struct udevice *dev) 444 { 445 struct pcie_aspeed *pcie = dev_get_priv(dev); 446 447 /* Get the controller base address */ 448 pcie->h2x_reg = (void *)devfdt_get_addr_index(dev, 0); 449 450 return 0; 451 } 452 453 static const struct dm_pci_ops pcie_aspeed_ops = { 454 .read_config = pcie_aspeed_read_config, 455 .write_config = pcie_aspeed_write_config, 456 }; 457 458 static const struct udevice_id pcie_aspeed_ids[] = { 459 { .compatible = "aspeed,ast2600-pcie" }, 460 { } 461 }; 462 463 U_BOOT_DRIVER(pcie_aspeed) = { 464 .name = "pcie_aspeed", 465 .id = UCLASS_PCI, 466 .of_match = pcie_aspeed_ids, 467 .ops = &pcie_aspeed_ops, 468 .ofdata_to_platdata = pcie_aspeed_ofdata_to_platdata, 469 .probe = pcie_aspeed_probe, 470 .priv_auto_alloc_size = sizeof(struct pcie_aspeed), 471 }; 472