1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/of.h> 13 #include <linux/types.h> 14 15 #include "../../pci.h" 16 #include "pcie-designware.h" 17 18 /* 19 * These interfaces resemble the pci_find_*capability() interfaces, but these 20 * are for configuring host controllers, which are bridges *to* PCI devices but 21 * are not PCI devices themselves. 22 */ 23 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, 24 u8 cap) 25 { 26 u8 cap_id, next_cap_ptr; 27 u16 reg; 28 29 if (!cap_ptr) 30 return 0; 31 32 reg = dw_pcie_readw_dbi(pci, cap_ptr); 33 cap_id = (reg & 0x00ff); 34 35 if (cap_id > PCI_CAP_ID_MAX) 36 return 0; 37 38 if (cap_id == cap) 39 return cap_ptr; 40 41 next_cap_ptr = (reg & 0xff00) >> 8; 42 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 43 } 44 45 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) 46 { 47 u8 next_cap_ptr; 48 u16 reg; 49 50 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); 51 next_cap_ptr = (reg & 0x00ff); 52 53 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 54 } 55 EXPORT_SYMBOL_GPL(dw_pcie_find_capability); 56 57 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, 58 u8 cap) 59 { 60 u32 header; 61 int ttl; 62 int pos = PCI_CFG_SPACE_SIZE; 63 64 /* minimum 8 bytes per capability */ 65 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 66 67 if (start) 68 pos = start; 69 70 header = dw_pcie_readl_dbi(pci, pos); 71 /* 72 * If we have no capabilities, this is indicated by cap ID, 73 * cap version and next pointer all being 0. 74 */ 75 if (header == 0) 76 return 0; 77 78 while (ttl-- > 0) { 79 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 80 return pos; 81 82 pos = PCI_EXT_CAP_NEXT(header); 83 if (pos < PCI_CFG_SPACE_SIZE) 84 break; 85 86 header = dw_pcie_readl_dbi(pci, pos); 87 } 88 89 return 0; 90 } 91 92 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) 93 { 94 return dw_pcie_find_next_ext_capability(pci, 0, cap); 95 } 96 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); 97 98 int dw_pcie_read(void __iomem *addr, int size, u32 *val) 99 { 100 if (!IS_ALIGNED((uintptr_t)addr, size)) { 101 *val = 0; 102 return PCIBIOS_BAD_REGISTER_NUMBER; 103 } 104 105 if (size == 4) { 106 *val = readl(addr); 107 } else if (size == 2) { 108 *val = readw(addr); 109 } else if (size == 1) { 110 *val = readb(addr); 111 } else { 112 *val = 0; 113 return PCIBIOS_BAD_REGISTER_NUMBER; 114 } 115 116 return PCIBIOS_SUCCESSFUL; 117 } 118 EXPORT_SYMBOL_GPL(dw_pcie_read); 119 120 int dw_pcie_write(void __iomem *addr, int size, u32 val) 121 { 122 if (!IS_ALIGNED((uintptr_t)addr, size)) 123 return PCIBIOS_BAD_REGISTER_NUMBER; 124 125 if (size == 4) 126 writel(val, addr); 127 else if (size == 2) 128 writew(val, addr); 129 else if (size == 1) 130 writeb(val, addr); 131 else 132 return PCIBIOS_BAD_REGISTER_NUMBER; 133 134 return PCIBIOS_SUCCESSFUL; 135 } 136 EXPORT_SYMBOL_GPL(dw_pcie_write); 137 138 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) 139 { 140 int ret; 141 u32 val; 142 143 if (pci->ops->read_dbi) 144 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); 145 146 ret = dw_pcie_read(pci->dbi_base + reg, size, &val); 147 if (ret) 148 dev_err(pci->dev, "Read DBI address failed\n"); 149 150 return val; 151 } 152 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); 153 154 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 155 { 156 int ret; 157 158 if (pci->ops->write_dbi) { 159 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); 160 return; 161 } 162 163 ret = dw_pcie_write(pci->dbi_base + reg, size, val); 164 if (ret) 165 dev_err(pci->dev, "Write DBI address failed\n"); 166 } 167 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); 168 169 u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size) 170 { 171 int ret; 172 u32 val; 173 174 if (pci->ops->read_dbi2) 175 return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size); 176 177 ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val); 178 if (ret) 179 dev_err(pci->dev, "read DBI address failed\n"); 180 181 return val; 182 } 183 184 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 185 { 186 int ret; 187 188 if (pci->ops->write_dbi2) { 189 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); 190 return; 191 } 192 193 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); 194 if (ret) 195 dev_err(pci->dev, "write DBI address failed\n"); 196 } 197 198 u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size) 199 { 200 int ret; 201 u32 val; 202 203 if (pci->ops->read_dbi) 204 return pci->ops->read_dbi(pci, pci->atu_base, reg, size); 205 206 ret = dw_pcie_read(pci->atu_base + reg, size, &val); 207 if (ret) 208 dev_err(pci->dev, "Read ATU address failed\n"); 209 210 return val; 211 } 212 213 void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 214 { 215 int ret; 216 217 if (pci->ops->write_dbi) { 218 pci->ops->write_dbi(pci, pci->atu_base, reg, size, val); 219 return; 220 } 221 222 ret = dw_pcie_write(pci->atu_base + reg, size, val); 223 if (ret) 224 dev_err(pci->dev, "Write ATU address failed\n"); 225 } 226 227 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) 228 { 229 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 230 231 return dw_pcie_readl_atu(pci, offset + reg); 232 } 233 234 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, 235 u32 val) 236 { 237 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 238 239 dw_pcie_writel_atu(pci, offset + reg, val); 240 } 241 242 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, 243 int type, u64 cpu_addr, 244 u64 pci_addr, u32 size) 245 { 246 u32 retries, val; 247 248 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, 249 lower_32_bits(cpu_addr)); 250 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, 251 upper_32_bits(cpu_addr)); 252 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, 253 lower_32_bits(cpu_addr + size - 1)); 254 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 255 lower_32_bits(pci_addr)); 256 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 257 upper_32_bits(pci_addr)); 258 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, 259 type); 260 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 261 PCIE_ATU_ENABLE); 262 263 /* 264 * Make sure ATU enable takes effect before any subsequent config 265 * and I/O accesses. 266 */ 267 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 268 val = dw_pcie_readl_ob_unroll(pci, index, 269 PCIE_ATU_UNR_REGION_CTRL2); 270 if (val & PCIE_ATU_ENABLE) 271 return; 272 273 mdelay(LINK_WAIT_IATU); 274 } 275 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 276 } 277 278 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 279 u64 cpu_addr, u64 pci_addr, u32 size) 280 { 281 u32 retries, val; 282 283 if (pci->ops->cpu_addr_fixup) 284 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); 285 286 if (pci->iatu_unroll_enabled) { 287 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, 288 pci_addr, size); 289 return; 290 } 291 292 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 293 PCIE_ATU_REGION_OUTBOUND | index); 294 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, 295 lower_32_bits(cpu_addr)); 296 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, 297 upper_32_bits(cpu_addr)); 298 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, 299 lower_32_bits(cpu_addr + size - 1)); 300 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 301 lower_32_bits(pci_addr)); 302 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, 303 upper_32_bits(pci_addr)); 304 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); 305 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); 306 307 /* 308 * Make sure ATU enable takes effect before any subsequent config 309 * and I/O accesses. 310 */ 311 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 312 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 313 if (val & PCIE_ATU_ENABLE) 314 return; 315 316 mdelay(LINK_WAIT_IATU); 317 } 318 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 319 } 320 321 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) 322 { 323 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 324 325 return dw_pcie_readl_atu(pci, offset + reg); 326 } 327 328 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, 329 u32 val) 330 { 331 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 332 333 dw_pcie_writel_atu(pci, offset + reg, val); 334 } 335 336 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, 337 int bar, u64 cpu_addr, 338 enum dw_pcie_as_type as_type) 339 { 340 int type; 341 u32 retries, val; 342 343 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 344 lower_32_bits(cpu_addr)); 345 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 346 upper_32_bits(cpu_addr)); 347 348 switch (as_type) { 349 case DW_PCIE_AS_MEM: 350 type = PCIE_ATU_TYPE_MEM; 351 break; 352 case DW_PCIE_AS_IO: 353 type = PCIE_ATU_TYPE_IO; 354 break; 355 default: 356 return -EINVAL; 357 } 358 359 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); 360 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 361 PCIE_ATU_ENABLE | 362 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 363 364 /* 365 * Make sure ATU enable takes effect before any subsequent config 366 * and I/O accesses. 367 */ 368 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 369 val = dw_pcie_readl_ib_unroll(pci, index, 370 PCIE_ATU_UNR_REGION_CTRL2); 371 if (val & PCIE_ATU_ENABLE) 372 return 0; 373 374 mdelay(LINK_WAIT_IATU); 375 } 376 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 377 378 return -EBUSY; 379 } 380 381 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, 382 u64 cpu_addr, enum dw_pcie_as_type as_type) 383 { 384 int type; 385 u32 retries, val; 386 387 if (pci->iatu_unroll_enabled) 388 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, 389 cpu_addr, as_type); 390 391 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | 392 index); 393 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); 394 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); 395 396 switch (as_type) { 397 case DW_PCIE_AS_MEM: 398 type = PCIE_ATU_TYPE_MEM; 399 break; 400 case DW_PCIE_AS_IO: 401 type = PCIE_ATU_TYPE_IO; 402 break; 403 default: 404 return -EINVAL; 405 } 406 407 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); 408 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE 409 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 410 411 /* 412 * Make sure ATU enable takes effect before any subsequent config 413 * and I/O accesses. 414 */ 415 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 416 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 417 if (val & PCIE_ATU_ENABLE) 418 return 0; 419 420 mdelay(LINK_WAIT_IATU); 421 } 422 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 423 424 return -EBUSY; 425 } 426 427 void dw_pcie_disable_atu(struct dw_pcie *pci, int index, 428 enum dw_pcie_region_type type) 429 { 430 int region; 431 432 switch (type) { 433 case DW_PCIE_REGION_INBOUND: 434 region = PCIE_ATU_REGION_INBOUND; 435 break; 436 case DW_PCIE_REGION_OUTBOUND: 437 region = PCIE_ATU_REGION_OUTBOUND; 438 break; 439 default: 440 return; 441 } 442 443 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); 444 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE); 445 } 446 447 int dw_pcie_wait_for_link(struct dw_pcie *pci) 448 { 449 int retries; 450 451 /* Check if the link is up or not */ 452 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 453 if (dw_pcie_link_up(pci)) { 454 dev_info(pci->dev, "Link up\n"); 455 return 0; 456 } 457 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 458 } 459 460 dev_info(pci->dev, "Phy link never came up\n"); 461 462 return -ETIMEDOUT; 463 } 464 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); 465 466 int dw_pcie_link_up(struct dw_pcie *pci) 467 { 468 u32 val; 469 470 if (pci->ops->link_up) 471 return pci->ops->link_up(pci); 472 473 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); 474 return ((val & PCIE_PORT_DEBUG1_LINK_UP) && 475 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); 476 } 477 478 void dw_pcie_upconfig_setup(struct dw_pcie *pci) 479 { 480 u32 val; 481 482 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL); 483 val |= PORT_MLTI_UPCFG_SUPPORT; 484 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val); 485 } 486 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); 487 488 void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) 489 { 490 u32 reg, val; 491 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 492 493 reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); 494 reg &= ~PCI_EXP_LNKCTL2_TLS; 495 496 switch (pcie_link_speed[link_gen]) { 497 case PCIE_SPEED_2_5GT: 498 reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; 499 break; 500 case PCIE_SPEED_5_0GT: 501 reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; 502 break; 503 case PCIE_SPEED_8_0GT: 504 reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; 505 break; 506 case PCIE_SPEED_16_0GT: 507 reg |= PCI_EXP_LNKCTL2_TLS_16_0GT; 508 break; 509 default: 510 /* Use hardware capability */ 511 val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 512 val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val); 513 reg &= ~PCI_EXP_LNKCTL2_HASD; 514 reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val); 515 break; 516 } 517 518 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg); 519 } 520 EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed); 521 522 void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts) 523 { 524 u32 val; 525 526 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 527 val &= ~PORT_LOGIC_N_FTS_MASK; 528 val |= n_fts & PORT_LOGIC_N_FTS_MASK; 529 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 530 } 531 EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts); 532 533 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) 534 { 535 u32 val; 536 537 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); 538 if (val == 0xffffffff) 539 return 1; 540 541 return 0; 542 } 543 544 void dw_pcie_setup(struct dw_pcie *pci) 545 { 546 int ret; 547 u32 val; 548 u32 lanes; 549 struct device *dev = pci->dev; 550 struct device_node *np = dev->of_node; 551 552 if (pci->version >= 0x480A || (!pci->version && 553 dw_pcie_iatu_unroll_enabled(pci))) { 554 pci->iatu_unroll_enabled = true; 555 if (!pci->atu_base) 556 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; 557 } 558 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? 559 "enabled" : "disabled"); 560 561 562 ret = of_property_read_u32(np, "num-lanes", &lanes); 563 if (ret) { 564 dev_dbg(pci->dev, "property num-lanes isn't found\n"); 565 return; 566 } 567 568 /* Set the number of lanes */ 569 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 570 val &= ~PORT_LINK_MODE_MASK; 571 switch (lanes) { 572 case 1: 573 val |= PORT_LINK_MODE_1_LANES; 574 break; 575 case 2: 576 val |= PORT_LINK_MODE_2_LANES; 577 break; 578 case 4: 579 val |= PORT_LINK_MODE_4_LANES; 580 break; 581 case 8: 582 val |= PORT_LINK_MODE_8_LANES; 583 break; 584 default: 585 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); 586 return; 587 } 588 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 589 590 /* Set link width speed control register */ 591 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 592 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 593 switch (lanes) { 594 case 1: 595 val |= PORT_LOGIC_LINK_WIDTH_1_LANES; 596 break; 597 case 2: 598 val |= PORT_LOGIC_LINK_WIDTH_2_LANES; 599 break; 600 case 4: 601 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 602 break; 603 case 8: 604 val |= PORT_LOGIC_LINK_WIDTH_8_LANES; 605 break; 606 } 607 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 608 609 if (of_property_read_bool(np, "snps,enable-cdm-check")) { 610 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 611 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | 612 PCIE_PL_CHK_REG_CHK_REG_START; 613 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 614 } 615 } 616