1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * https://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/of.h> 13 #include <linux/of_platform.h> 14 #include <linux/types.h> 15 16 #include "../../pci.h" 17 #include "pcie-designware.h" 18 19 /* 20 * These interfaces resemble the pci_find_*capability() interfaces, but these 21 * are for configuring host controllers, which are bridges *to* PCI devices but 22 * are not PCI devices themselves. 23 */ 24 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, 25 u8 cap) 26 { 27 u8 cap_id, next_cap_ptr; 28 u16 reg; 29 30 if (!cap_ptr) 31 return 0; 32 33 reg = dw_pcie_readw_dbi(pci, cap_ptr); 34 cap_id = (reg & 0x00ff); 35 36 if (cap_id > PCI_CAP_ID_MAX) 37 return 0; 38 39 if (cap_id == cap) 40 return cap_ptr; 41 42 next_cap_ptr = (reg & 0xff00) >> 8; 43 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 44 } 45 46 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap) 47 { 48 u8 next_cap_ptr; 49 u16 reg; 50 51 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); 52 next_cap_ptr = (reg & 0x00ff); 53 54 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap); 55 } 56 EXPORT_SYMBOL_GPL(dw_pcie_find_capability); 57 58 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start, 59 u8 cap) 60 { 61 u32 header; 62 int ttl; 63 int pos = PCI_CFG_SPACE_SIZE; 64 65 /* minimum 8 bytes per capability */ 66 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 67 68 if (start) 69 pos = start; 70 71 header = dw_pcie_readl_dbi(pci, pos); 72 /* 73 * If we have no capabilities, this is indicated by cap ID, 74 * cap version and next pointer all being 0. 75 */ 76 if (header == 0) 77 return 0; 78 79 while (ttl-- > 0) { 80 if (PCI_EXT_CAP_ID(header) == cap && pos != start) 81 return pos; 82 83 pos = PCI_EXT_CAP_NEXT(header); 84 if (pos < PCI_CFG_SPACE_SIZE) 85 break; 86 87 header = dw_pcie_readl_dbi(pci, pos); 88 } 89 90 return 0; 91 } 92 93 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) 94 { 95 return dw_pcie_find_next_ext_capability(pci, 0, cap); 96 } 97 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); 98 99 int dw_pcie_read(void __iomem *addr, int size, u32 *val) 100 { 101 if (!IS_ALIGNED((uintptr_t)addr, size)) { 102 *val = 0; 103 return PCIBIOS_BAD_REGISTER_NUMBER; 104 } 105 106 if (size == 4) { 107 *val = readl(addr); 108 } else if (size == 2) { 109 *val = readw(addr); 110 } else if (size == 1) { 111 *val = readb(addr); 112 } else { 113 *val = 0; 114 return PCIBIOS_BAD_REGISTER_NUMBER; 115 } 116 117 return PCIBIOS_SUCCESSFUL; 118 } 119 EXPORT_SYMBOL_GPL(dw_pcie_read); 120 121 int dw_pcie_write(void __iomem *addr, int size, u32 val) 122 { 123 if (!IS_ALIGNED((uintptr_t)addr, size)) 124 return PCIBIOS_BAD_REGISTER_NUMBER; 125 126 if (size == 4) 127 writel(val, addr); 128 else if (size == 2) 129 writew(val, addr); 130 else if (size == 1) 131 writeb(val, addr); 132 else 133 return PCIBIOS_BAD_REGISTER_NUMBER; 134 135 return PCIBIOS_SUCCESSFUL; 136 } 137 EXPORT_SYMBOL_GPL(dw_pcie_write); 138 139 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size) 140 { 141 int ret; 142 u32 val; 143 144 if (pci->ops->read_dbi) 145 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size); 146 147 ret = dw_pcie_read(pci->dbi_base + reg, size, &val); 148 if (ret) 149 dev_err(pci->dev, "Read DBI address failed\n"); 150 151 return val; 152 } 153 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi); 154 155 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 156 { 157 int ret; 158 159 if (pci->ops->write_dbi) { 160 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val); 161 return; 162 } 163 164 ret = dw_pcie_write(pci->dbi_base + reg, size, val); 165 if (ret) 166 dev_err(pci->dev, "Write DBI address failed\n"); 167 } 168 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); 169 170 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) 171 { 172 int ret; 173 174 if (pci->ops->write_dbi2) { 175 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val); 176 return; 177 } 178 179 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val); 180 if (ret) 181 dev_err(pci->dev, "write DBI address failed\n"); 182 } 183 184 static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) 185 { 186 int ret; 187 u32 val; 188 189 if (pci->ops->read_dbi) 190 return pci->ops->read_dbi(pci, pci->atu_base, reg, 4); 191 192 ret = dw_pcie_read(pci->atu_base + reg, 4, &val); 193 if (ret) 194 dev_err(pci->dev, "Read ATU address failed\n"); 195 196 return val; 197 } 198 199 static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) 200 { 201 int ret; 202 203 if (pci->ops->write_dbi) { 204 pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val); 205 return; 206 } 207 208 ret = dw_pcie_write(pci->atu_base + reg, 4, val); 209 if (ret) 210 dev_err(pci->dev, "Write ATU address failed\n"); 211 } 212 213 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) 214 { 215 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 216 217 return dw_pcie_readl_atu(pci, offset + reg); 218 } 219 220 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, 221 u32 val) 222 { 223 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 224 225 dw_pcie_writel_atu(pci, offset + reg, val); 226 } 227 228 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no, 229 int index, int type, 230 u64 cpu_addr, u64 pci_addr, 231 u32 size) 232 { 233 u32 retries, val; 234 u64 limit_addr = cpu_addr + size - 1; 235 236 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, 237 lower_32_bits(cpu_addr)); 238 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, 239 upper_32_bits(cpu_addr)); 240 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT, 241 lower_32_bits(limit_addr)); 242 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, 243 upper_32_bits(limit_addr)); 244 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 245 lower_32_bits(pci_addr)); 246 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 247 upper_32_bits(pci_addr)); 248 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, 249 type | PCIE_ATU_FUNC_NUM(func_no)); 250 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 251 PCIE_ATU_ENABLE); 252 253 /* 254 * Make sure ATU enable takes effect before any subsequent config 255 * and I/O accesses. 256 */ 257 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 258 val = dw_pcie_readl_ob_unroll(pci, index, 259 PCIE_ATU_UNR_REGION_CTRL2); 260 if (val & PCIE_ATU_ENABLE) 261 return; 262 263 mdelay(LINK_WAIT_IATU); 264 } 265 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 266 } 267 268 static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, 269 int index, int type, u64 cpu_addr, 270 u64 pci_addr, u32 size) 271 { 272 u32 retries, val; 273 274 if (pci->ops->cpu_addr_fixup) 275 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); 276 277 if (pci->iatu_unroll_enabled) { 278 dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type, 279 cpu_addr, pci_addr, size); 280 return; 281 } 282 283 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 284 PCIE_ATU_REGION_OUTBOUND | index); 285 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, 286 lower_32_bits(cpu_addr)); 287 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, 288 upper_32_bits(cpu_addr)); 289 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, 290 lower_32_bits(cpu_addr + size - 1)); 291 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 292 lower_32_bits(pci_addr)); 293 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, 294 upper_32_bits(pci_addr)); 295 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | 296 PCIE_ATU_FUNC_NUM(func_no)); 297 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); 298 299 /* 300 * Make sure ATU enable takes effect before any subsequent config 301 * and I/O accesses. 302 */ 303 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 304 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 305 if (val & PCIE_ATU_ENABLE) 306 return; 307 308 mdelay(LINK_WAIT_IATU); 309 } 310 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 311 } 312 313 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 314 u64 cpu_addr, u64 pci_addr, u32 size) 315 { 316 __dw_pcie_prog_outbound_atu(pci, 0, index, type, 317 cpu_addr, pci_addr, size); 318 } 319 320 void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, 321 int type, u64 cpu_addr, u64 pci_addr, 322 u32 size) 323 { 324 __dw_pcie_prog_outbound_atu(pci, func_no, index, type, 325 cpu_addr, pci_addr, size); 326 } 327 328 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) 329 { 330 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 331 332 return dw_pcie_readl_atu(pci, offset + reg); 333 } 334 335 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, 336 u32 val) 337 { 338 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 339 340 dw_pcie_writel_atu(pci, offset + reg, val); 341 } 342 343 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no, 344 int index, int bar, u64 cpu_addr, 345 enum dw_pcie_as_type as_type) 346 { 347 int type; 348 u32 retries, val; 349 350 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 351 lower_32_bits(cpu_addr)); 352 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 353 upper_32_bits(cpu_addr)); 354 355 switch (as_type) { 356 case DW_PCIE_AS_MEM: 357 type = PCIE_ATU_TYPE_MEM; 358 break; 359 case DW_PCIE_AS_IO: 360 type = PCIE_ATU_TYPE_IO; 361 break; 362 default: 363 return -EINVAL; 364 } 365 366 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type | 367 PCIE_ATU_FUNC_NUM(func_no)); 368 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 369 PCIE_ATU_FUNC_NUM_MATCH_EN | 370 PCIE_ATU_ENABLE | 371 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 372 373 /* 374 * Make sure ATU enable takes effect before any subsequent config 375 * and I/O accesses. 376 */ 377 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 378 val = dw_pcie_readl_ib_unroll(pci, index, 379 PCIE_ATU_UNR_REGION_CTRL2); 380 if (val & PCIE_ATU_ENABLE) 381 return 0; 382 383 mdelay(LINK_WAIT_IATU); 384 } 385 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 386 387 return -EBUSY; 388 } 389 390 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, 391 int bar, u64 cpu_addr, 392 enum dw_pcie_as_type as_type) 393 { 394 int type; 395 u32 retries, val; 396 397 if (pci->iatu_unroll_enabled) 398 return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar, 399 cpu_addr, as_type); 400 401 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | 402 index); 403 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); 404 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); 405 406 switch (as_type) { 407 case DW_PCIE_AS_MEM: 408 type = PCIE_ATU_TYPE_MEM; 409 break; 410 case DW_PCIE_AS_IO: 411 type = PCIE_ATU_TYPE_IO; 412 break; 413 default: 414 return -EINVAL; 415 } 416 417 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | 418 PCIE_ATU_FUNC_NUM(func_no)); 419 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | 420 PCIE_ATU_FUNC_NUM_MATCH_EN | 421 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 422 423 /* 424 * Make sure ATU enable takes effect before any subsequent config 425 * and I/O accesses. 426 */ 427 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 428 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 429 if (val & PCIE_ATU_ENABLE) 430 return 0; 431 432 mdelay(LINK_WAIT_IATU); 433 } 434 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 435 436 return -EBUSY; 437 } 438 439 void dw_pcie_disable_atu(struct dw_pcie *pci, int index, 440 enum dw_pcie_region_type type) 441 { 442 int region; 443 444 switch (type) { 445 case DW_PCIE_REGION_INBOUND: 446 region = PCIE_ATU_REGION_INBOUND; 447 break; 448 case DW_PCIE_REGION_OUTBOUND: 449 region = PCIE_ATU_REGION_OUTBOUND; 450 break; 451 default: 452 return; 453 } 454 455 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); 456 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE); 457 } 458 459 int dw_pcie_wait_for_link(struct dw_pcie *pci) 460 { 461 int retries; 462 463 /* Check if the link is up or not */ 464 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 465 if (dw_pcie_link_up(pci)) { 466 dev_info(pci->dev, "Link up\n"); 467 return 0; 468 } 469 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 470 } 471 472 dev_info(pci->dev, "Phy link never came up\n"); 473 474 return -ETIMEDOUT; 475 } 476 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); 477 478 int dw_pcie_link_up(struct dw_pcie *pci) 479 { 480 u32 val; 481 482 if (pci->ops->link_up) 483 return pci->ops->link_up(pci); 484 485 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); 486 return ((val & PCIE_PORT_DEBUG1_LINK_UP) && 487 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); 488 } 489 490 void dw_pcie_upconfig_setup(struct dw_pcie *pci) 491 { 492 u32 val; 493 494 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL); 495 val |= PORT_MLTI_UPCFG_SUPPORT; 496 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val); 497 } 498 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); 499 500 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) 501 { 502 u32 cap, ctrl2, link_speed; 503 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 504 505 cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 506 ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); 507 ctrl2 &= ~PCI_EXP_LNKCTL2_TLS; 508 509 switch (pcie_link_speed[link_gen]) { 510 case PCIE_SPEED_2_5GT: 511 link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT; 512 break; 513 case PCIE_SPEED_5_0GT: 514 link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT; 515 break; 516 case PCIE_SPEED_8_0GT: 517 link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT; 518 break; 519 case PCIE_SPEED_16_0GT: 520 link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT; 521 break; 522 default: 523 /* Use hardware capability */ 524 link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); 525 ctrl2 &= ~PCI_EXP_LNKCTL2_HASD; 526 break; 527 } 528 529 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed); 530 531 cap &= ~((u32)PCI_EXP_LNKCAP_SLS); 532 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); 533 534 } 535 536 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) 537 { 538 u32 val; 539 540 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); 541 if (val == 0xffffffff) 542 return 1; 543 544 return 0; 545 } 546 547 void dw_pcie_setup(struct dw_pcie *pci) 548 { 549 u32 val; 550 struct device *dev = pci->dev; 551 struct device_node *np = dev->of_node; 552 struct platform_device *pdev = to_platform_device(dev); 553 554 if (pci->version >= 0x480A || (!pci->version && 555 dw_pcie_iatu_unroll_enabled(pci))) { 556 pci->iatu_unroll_enabled = true; 557 if (!pci->atu_base) 558 pci->atu_base = 559 devm_platform_ioremap_resource_byname(pdev, "atu"); 560 if (IS_ERR(pci->atu_base)) 561 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; 562 } 563 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? 564 "enabled" : "disabled"); 565 566 if (pci->link_gen > 0) 567 dw_pcie_link_set_max_speed(pci, pci->link_gen); 568 569 /* Configure Gen1 N_FTS */ 570 if (pci->n_fts[0]) { 571 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 572 val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK); 573 val |= PORT_AFR_N_FTS(pci->n_fts[0]); 574 val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]); 575 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 576 } 577 578 /* Configure Gen2+ N_FTS */ 579 if (pci->n_fts[1]) { 580 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 581 val &= ~PORT_LOGIC_N_FTS_MASK; 582 val |= pci->n_fts[pci->link_gen - 1]; 583 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 584 } 585 586 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 587 val &= ~PORT_LINK_FAST_LINK_MODE; 588 val |= PORT_LINK_DLL_LINK_EN; 589 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 590 591 of_property_read_u32(np, "num-lanes", &pci->num_lanes); 592 if (!pci->num_lanes) { 593 dev_dbg(pci->dev, "Using h/w default number of lanes\n"); 594 return; 595 } 596 597 /* Set the number of lanes */ 598 val &= ~PORT_LINK_FAST_LINK_MODE; 599 val &= ~PORT_LINK_MODE_MASK; 600 switch (pci->num_lanes) { 601 case 1: 602 val |= PORT_LINK_MODE_1_LANES; 603 break; 604 case 2: 605 val |= PORT_LINK_MODE_2_LANES; 606 break; 607 case 4: 608 val |= PORT_LINK_MODE_4_LANES; 609 break; 610 case 8: 611 val |= PORT_LINK_MODE_8_LANES; 612 break; 613 default: 614 dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes); 615 return; 616 } 617 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 618 619 /* Set link width speed control register */ 620 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 621 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 622 switch (pci->num_lanes) { 623 case 1: 624 val |= PORT_LOGIC_LINK_WIDTH_1_LANES; 625 break; 626 case 2: 627 val |= PORT_LOGIC_LINK_WIDTH_2_LANES; 628 break; 629 case 4: 630 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 631 break; 632 case 8: 633 val |= PORT_LOGIC_LINK_WIDTH_8_LANES; 634 break; 635 } 636 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 637 638 if (of_property_read_bool(np, "snps,enable-cdm-check")) { 639 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 640 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | 641 PCIE_PL_CHK_REG_CHK_REG_START; 642 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 643 } 644 } 645