1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/of.h> 13 #include <linux/types.h> 14 15 #include "../../pci.h" 16 #include "pcie-designware.h" 17 18 /* 19 * These interfaces resemble the pci_find_*capability() interfaces, but these 20 * are for configuring host controllers, which are bridges *to* PCI devices but 21 * are not PCI devices themselves. 22 */ 23 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, 24 u8 cap) 25 { 26 u8 cap_id, next_cap_ptr; 27 u16 reg; 28 29 if (!cap_ptr) 30 return 0; 31 32 reg = dw_pcie_readw_dbi(pci, cap_ptr); 33 cap_id = (reg & 0x00ff); 34 35 if (cap_id > PCI_CAP_ID_MAX) 36 return 0; 37 38 if (cap_id == cap) 39 return cap_ptr; 40 41 next_cap_ptr = (reg & 0xff00) >> 8; 42 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 43 } 44 45 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) 46 { 47 u8 next_cap_ptr; 48 u16 reg; 49 50 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); 51 next_cap_ptr = (reg & 0x00ff); 52 53 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 54 } 55 EXPORT_SYMBOL_GPL(dw_pcie_find_capability); 56 57 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, 58 u8 cap) 59 { 60 u32 header; 61 int ttl; 62 int pos = PCI_CFG_SPACE_SIZE; 63 64 /* minimum 8 bytes per capability */ 65 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 66 67 if (start) 68 pos = start; 69 70 header = dw_pcie_readl_dbi(pci, pos); 71 /* 72 * If we have no capabilities, this is indicated by cap ID, 73 * cap version and next pointer all being 0. 74 */ 75 if (header == 0) 76 return 0; 77 78 while (ttl-- > 0) { 79 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 80 return pos; 81 82 pos = PCI_EXT_CAP_NEXT(header); 83 if (pos < PCI_CFG_SPACE_SIZE) 84 break; 85 86 header = dw_pcie_readl_dbi(pci, pos); 87 } 88 89 return 0; 90 } 91 92 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) 93 { 94 return dw_pcie_find_next_ext_capability(pci, 0, cap); 95 } 96 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); 97 98 int dw_pcie_read(void __iomem *addr, int size, u32 *val) 99 { 100 if (!IS_ALIGNED((uintptr_t)addr, size)) { 101 *val = 0; 102 return PCIBIOS_BAD_REGISTER_NUMBER; 103 } 104 105 if (size == 4) { 106 *val = readl(addr); 107 } else if (size == 2) { 108 *val = readw(addr); 109 } else if (size == 1) { 110 *val = readb(addr); 111 } else { 112 *val = 0; 113 return PCIBIOS_BAD_REGISTER_NUMBER; 114 } 115 116 return PCIBIOS_SUCCESSFUL; 117 } 118 EXPORT_SYMBOL_GPL(dw_pcie_read); 119 120 int dw_pcie_write(void __iomem *addr, int size, u32 val) 121 { 122 if (!IS_ALIGNED((uintptr_t)addr, size)) 123 return PCIBIOS_BAD_REGISTER_NUMBER; 124 125 if (size == 4) 126 writel(val, addr); 127 else if (size == 2) 128 writew(val, addr); 129 else if (size == 1) 130 writeb(val, addr); 131 else 132 return PCIBIOS_BAD_REGISTER_NUMBER; 133 134 return PCIBIOS_SUCCESSFUL; 135 } 136 EXPORT_SYMBOL_GPL(dw_pcie_write); 137 138 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) 139 { 140 int ret; 141 u32 val; 142 143 if (pci->ops->read_dbi) 144 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); 145 146 ret = dw_pcie_read(pci->dbi_base + reg, size, &val); 147 if (ret) 148 dev_err(pci->dev, "Read DBI address failed\n"); 149 150 return val; 151 } 152 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); 153 154 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 155 { 156 int ret; 157 158 if (pci->ops->write_dbi) { 159 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); 160 return; 161 } 162 163 ret = dw_pcie_write(pci->dbi_base + reg, size, val); 164 if (ret) 165 dev_err(pci->dev, "Write DBI address failed\n"); 166 } 167 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); 168 169 u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size) 170 { 171 int ret; 172 u32 val; 173 174 if (pci->ops->read_dbi2) 175 return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size); 176 177 ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val); 178 if (ret) 179 dev_err(pci->dev, "read DBI address failed\n"); 180 181 return val; 182 } 183 184 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 185 { 186 int ret; 187 188 if (pci->ops->write_dbi2) { 189 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); 190 return; 191 } 192 193 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); 194 if (ret) 195 dev_err(pci->dev, "write DBI address failed\n"); 196 } 197 198 u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size) 199 { 200 int ret; 201 u32 val; 202 203 if (pci->ops->read_dbi) 204 return pci->ops->read_dbi(pci, pci->atu_base, reg, size); 205 206 ret = dw_pcie_read(pci->atu_base + reg, size, &val); 207 if (ret) 208 dev_err(pci->dev, "Read ATU address failed\n"); 209 210 return val; 211 } 212 213 void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 214 { 215 int ret; 216 217 if (pci->ops->write_dbi) { 218 pci->ops->write_dbi(pci, pci->atu_base, reg, size, val); 219 return; 220 } 221 222 ret = dw_pcie_write(pci->atu_base + reg, size, val); 223 if (ret) 224 dev_err(pci->dev, "Write ATU address failed\n"); 225 } 226 227 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) 228 { 229 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 230 231 return dw_pcie_readl_atu(pci, offset + reg); 232 } 233 234 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, 235 u32 val) 236 { 237 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 238 239 dw_pcie_writel_atu(pci, offset + reg, val); 240 } 241 242 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, 243 int type, u64 cpu_addr, 244 u64 pci_addr, u32 size) 245 { 246 u32 retries, val; 247 u64 limit_addr = cpu_addr + size - 1; 248 249 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, 250 lower_32_bits(cpu_addr)); 251 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, 252 upper_32_bits(cpu_addr)); 253 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT, 254 lower_32_bits(limit_addr)); 255 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, 256 upper_32_bits(limit_addr)); 257 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 258 lower_32_bits(pci_addr)); 259 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 260 upper_32_bits(pci_addr)); 261 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, 262 type); 263 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 264 PCIE_ATU_ENABLE); 265 266 /* 267 * Make sure ATU enable takes effect before any subsequent config 268 * and I/O accesses. 269 */ 270 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 271 val = dw_pcie_readl_ob_unroll(pci, index, 272 PCIE_ATU_UNR_REGION_CTRL2); 273 if (val & PCIE_ATU_ENABLE) 274 return; 275 276 mdelay(LINK_WAIT_IATU); 277 } 278 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 279 } 280 281 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 282 u64 cpu_addr, u64 pci_addr, u32 size) 283 { 284 u32 retries, val; 285 286 if (pci->ops->cpu_addr_fixup) 287 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); 288 289 if (pci->iatu_unroll_enabled) { 290 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, 291 pci_addr, size); 292 return; 293 } 294 295 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 296 PCIE_ATU_REGION_OUTBOUND | index); 297 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, 298 lower_32_bits(cpu_addr)); 299 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, 300 upper_32_bits(cpu_addr)); 301 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, 302 lower_32_bits(cpu_addr + size - 1)); 303 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 304 lower_32_bits(pci_addr)); 305 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, 306 upper_32_bits(pci_addr)); 307 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); 308 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); 309 310 /* 311 * Make sure ATU enable takes effect before any subsequent config 312 * and I/O accesses. 313 */ 314 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 315 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 316 if (val & PCIE_ATU_ENABLE) 317 return; 318 319 mdelay(LINK_WAIT_IATU); 320 } 321 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 322 } 323 324 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) 325 { 326 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 327 328 return dw_pcie_readl_atu(pci, offset + reg); 329 } 330 331 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, 332 u32 val) 333 { 334 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 335 336 dw_pcie_writel_atu(pci, offset + reg, val); 337 } 338 339 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, 340 int bar, u64 cpu_addr, 341 enum dw_pcie_as_type as_type) 342 { 343 int type; 344 u32 retries, val; 345 346 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 347 lower_32_bits(cpu_addr)); 348 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 349 upper_32_bits(cpu_addr)); 350 351 switch (as_type) { 352 case DW_PCIE_AS_MEM: 353 type = PCIE_ATU_TYPE_MEM; 354 break; 355 case DW_PCIE_AS_IO: 356 type = PCIE_ATU_TYPE_IO; 357 break; 358 default: 359 return -EINVAL; 360 } 361 362 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); 363 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 364 PCIE_ATU_ENABLE | 365 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 366 367 /* 368 * Make sure ATU enable takes effect before any subsequent config 369 * and I/O accesses. 370 */ 371 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 372 val = dw_pcie_readl_ib_unroll(pci, index, 373 PCIE_ATU_UNR_REGION_CTRL2); 374 if (val & PCIE_ATU_ENABLE) 375 return 0; 376 377 mdelay(LINK_WAIT_IATU); 378 } 379 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 380 381 return -EBUSY; 382 } 383 384 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, 385 u64 cpu_addr, enum dw_pcie_as_type as_type) 386 { 387 int type; 388 u32 retries, val; 389 390 if (pci->iatu_unroll_enabled) 391 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, 392 cpu_addr, as_type); 393 394 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | 395 index); 396 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); 397 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); 398 399 switch (as_type) { 400 case DW_PCIE_AS_MEM: 401 type = PCIE_ATU_TYPE_MEM; 402 break; 403 case DW_PCIE_AS_IO: 404 type = PCIE_ATU_TYPE_IO; 405 break; 406 default: 407 return -EINVAL; 408 } 409 410 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); 411 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE 412 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 413 414 /* 415 * Make sure ATU enable takes effect before any subsequent config 416 * and I/O accesses. 417 */ 418 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 419 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 420 if (val & PCIE_ATU_ENABLE) 421 return 0; 422 423 mdelay(LINK_WAIT_IATU); 424 } 425 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 426 427 return -EBUSY; 428 } 429 430 void dw_pcie_disable_atu(struct dw_pcie *pci, int index, 431 enum dw_pcie_region_type type) 432 { 433 int region; 434 435 switch (type) { 436 case DW_PCIE_REGION_INBOUND: 437 region = PCIE_ATU_REGION_INBOUND; 438 break; 439 case DW_PCIE_REGION_OUTBOUND: 440 region = PCIE_ATU_REGION_OUTBOUND; 441 break; 442 default: 443 return; 444 } 445 446 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); 447 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE); 448 } 449 450 int dw_pcie_wait_for_link(struct dw_pcie *pci) 451 { 452 int retries; 453 454 /* Check if the link is up or not */ 455 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 456 if (dw_pcie_link_up(pci)) { 457 dev_info(pci->dev, "Link up\n"); 458 return 0; 459 } 460 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 461 } 462 463 dev_info(pci->dev, "Phy link never came up\n"); 464 465 return -ETIMEDOUT; 466 } 467 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); 468 469 int dw_pcie_link_up(struct dw_pcie *pci) 470 { 471 u32 val; 472 473 if (pci->ops->link_up) 474 return pci->ops->link_up(pci); 475 476 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); 477 return ((val & PCIE_PORT_DEBUG1_LINK_UP) && 478 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); 479 } 480 481 void dw_pcie_upconfig_setup(struct dw_pcie *pci) 482 { 483 u32 val; 484 485 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL); 486 val |= PORT_MLTI_UPCFG_SUPPORT; 487 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val); 488 } 489 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); 490 491 void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) 492 { 493 u32 reg, val; 494 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 495 496 reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); 497 reg &= ~PCI_EXP_LNKCTL2_TLS; 498 499 switch (pcie_link_speed[link_gen]) { 500 case PCIE_SPEED_2_5GT: 501 reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; 502 break; 503 case PCIE_SPEED_5_0GT: 504 reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; 505 break; 506 case PCIE_SPEED_8_0GT: 507 reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; 508 break; 509 case PCIE_SPEED_16_0GT: 510 reg |= PCI_EXP_LNKCTL2_TLS_16_0GT; 511 break; 512 default: 513 /* Use hardware capability */ 514 val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 515 val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val); 516 reg &= ~PCI_EXP_LNKCTL2_HASD; 517 reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val); 518 break; 519 } 520 521 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg); 522 } 523 EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed); 524 525 void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts) 526 { 527 u32 val; 528 529 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 530 val &= ~PORT_LOGIC_N_FTS_MASK; 531 val |= n_fts & PORT_LOGIC_N_FTS_MASK; 532 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 533 } 534 EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts); 535 536 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) 537 { 538 u32 val; 539 540 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); 541 if (val == 0xffffffff) 542 return 1; 543 544 return 0; 545 } 546 547 void dw_pcie_setup(struct dw_pcie *pci) 548 { 549 int ret; 550 u32 val; 551 u32 lanes; 552 struct device *dev = pci->dev; 553 struct device_node *np = dev->of_node; 554 555 if (pci->version >= 0x480A || (!pci->version && 556 dw_pcie_iatu_unroll_enabled(pci))) { 557 pci->iatu_unroll_enabled = true; 558 if (!pci->atu_base) 559 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; 560 } 561 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? 562 "enabled" : "disabled"); 563 564 565 ret = of_property_read_u32(np, "num-lanes", &lanes); 566 if (ret) { 567 dev_dbg(pci->dev, "property num-lanes isn't found\n"); 568 return; 569 } 570 571 /* Set the number of lanes */ 572 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 573 val &= ~PORT_LINK_MODE_MASK; 574 switch (lanes) { 575 case 1: 576 val |= PORT_LINK_MODE_1_LANES; 577 break; 578 case 2: 579 val |= PORT_LINK_MODE_2_LANES; 580 break; 581 case 4: 582 val |= PORT_LINK_MODE_4_LANES; 583 break; 584 case 8: 585 val |= PORT_LINK_MODE_8_LANES; 586 break; 587 default: 588 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); 589 return; 590 } 591 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 592 593 /* Set link width speed control register */ 594 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 595 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 596 switch (lanes) { 597 case 1: 598 val |= PORT_LOGIC_LINK_WIDTH_1_LANES; 599 break; 600 case 2: 601 val |= PORT_LOGIC_LINK_WIDTH_2_LANES; 602 break; 603 case 4: 604 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 605 break; 606 case 8: 607 val |= PORT_LOGIC_LINK_WIDTH_8_LANES; 608 break; 609 } 610 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 611 612 if (of_property_read_bool(np, "snps,enable-cdm-check")) { 613 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 614 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | 615 PCIE_PL_CHK_REG_CHK_REG_START; 616 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 617 } 618 } 619