1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/of.h> 13 #include <linux/types.h> 14 15 #include "pcie-designware.h" 16 17 int dw_pcie_read(void __iomem *addr, int size, u32 *val) 18 { 19 if (!IS_ALIGNED((uintptr_t)addr, size)) { 20 *val = 0; 21 return PCIBIOS_BAD_REGISTER_NUMBER; 22 } 23 24 if (size == 4) { 25 *val = readl(addr); 26 } else if (size == 2) { 27 *val = readw(addr); 28 } else if (size == 1) { 29 *val = readb(addr); 30 } else { 31 *val = 0; 32 return PCIBIOS_BAD_REGISTER_NUMBER; 33 } 34 35 return PCIBIOS_SUCCESSFUL; 36 } 37 38 int dw_pcie_write(void __iomem *addr, int size, u32 val) 39 { 40 if (!IS_ALIGNED((uintptr_t)addr, size)) 41 return PCIBIOS_BAD_REGISTER_NUMBER; 42 43 if (size == 4) 44 writel(val, addr); 45 else if (size == 2) 46 writew(val, addr); 47 else if (size == 1) 48 writeb(val, addr); 49 else 50 return PCIBIOS_BAD_REGISTER_NUMBER; 51 52 return PCIBIOS_SUCCESSFUL; 53 } 54 55 u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, 56 size_t size) 57 { 58 int ret; 59 u32 val; 60 61 if (pci->ops->read_dbi) 62 return pci->ops->read_dbi(pci, base, reg, size); 63 64 ret = dw_pcie_read(base + reg, size, &val); 65 if (ret) 66 dev_err(pci->dev, "Read DBI address failed\n"); 67 68 return val; 69 } 70 71 void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, 72 size_t size, u32 val) 73 { 74 int ret; 75 76 if (pci->ops->write_dbi) { 77 pci->ops->write_dbi(pci, base, reg, size, val); 78 return; 79 } 80 81 ret = dw_pcie_write(base + reg, size, val); 82 if (ret) 83 dev_err(pci->dev, "Write DBI address failed\n"); 84 } 85 86 u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, 87 size_t size) 88 { 89 int ret; 90 u32 val; 91 92 if (pci->ops->read_dbi2) 93 return pci->ops->read_dbi2(pci, base, reg, size); 94 95 ret = dw_pcie_read(base + reg, size, &val); 96 if (ret) 97 dev_err(pci->dev, "read DBI address failed\n"); 98 99 return val; 100 } 101 102 void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, 103 size_t size, u32 val) 104 { 105 int ret; 106 107 if (pci->ops->write_dbi2) { 108 pci->ops->write_dbi2(pci, base, reg, size, val); 109 return; 110 } 111 112 ret = dw_pcie_write(base + reg, size, val); 113 if (ret) 114 dev_err(pci->dev, "write DBI address failed\n"); 115 } 116 117 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) 118 { 119 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 120 121 return dw_pcie_readl_atu(pci, offset + reg); 122 } 123 124 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, 125 u32 val) 126 { 127 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 128 129 dw_pcie_writel_atu(pci, offset + reg, val); 130 } 131 132 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, 133 int type, u64 cpu_addr, 134 u64 pci_addr, u32 size) 135 { 136 u32 retries, val; 137 138 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, 139 lower_32_bits(cpu_addr)); 140 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, 141 upper_32_bits(cpu_addr)); 142 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, 143 lower_32_bits(cpu_addr + size - 1)); 144 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 145 lower_32_bits(pci_addr)); 146 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 147 upper_32_bits(pci_addr)); 148 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, 149 type); 150 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 151 PCIE_ATU_ENABLE); 152 153 /* 154 * Make sure ATU enable takes effect before any subsequent config 155 * and I/O accesses. 156 */ 157 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 158 val = dw_pcie_readl_ob_unroll(pci, index, 159 PCIE_ATU_UNR_REGION_CTRL2); 160 if (val & PCIE_ATU_ENABLE) 161 return; 162 163 mdelay(LINK_WAIT_IATU); 164 } 165 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 166 } 167 168 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 169 u64 cpu_addr, u64 pci_addr, u32 size) 170 { 171 u32 retries, val; 172 173 if (pci->ops->cpu_addr_fixup) 174 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); 175 176 if (pci->iatu_unroll_enabled) { 177 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, 178 pci_addr, size); 179 return; 180 } 181 182 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 183 PCIE_ATU_REGION_OUTBOUND | index); 184 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, 185 lower_32_bits(cpu_addr)); 186 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, 187 upper_32_bits(cpu_addr)); 188 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, 189 lower_32_bits(cpu_addr + size - 1)); 190 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 191 lower_32_bits(pci_addr)); 192 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, 193 upper_32_bits(pci_addr)); 194 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); 195 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); 196 197 /* 198 * Make sure ATU enable takes effect before any subsequent config 199 * and I/O accesses. 200 */ 201 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 202 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 203 if (val & PCIE_ATU_ENABLE) 204 return; 205 206 mdelay(LINK_WAIT_IATU); 207 } 208 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 209 } 210 211 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) 212 { 213 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 214 215 return dw_pcie_readl_atu(pci, offset + reg); 216 } 217 218 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, 219 u32 val) 220 { 221 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); 222 223 dw_pcie_writel_atu(pci, offset + reg, val); 224 } 225 226 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, 227 int bar, u64 cpu_addr, 228 enum dw_pcie_as_type as_type) 229 { 230 int type; 231 u32 retries, val; 232 233 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, 234 lower_32_bits(cpu_addr)); 235 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, 236 upper_32_bits(cpu_addr)); 237 238 switch (as_type) { 239 case DW_PCIE_AS_MEM: 240 type = PCIE_ATU_TYPE_MEM; 241 break; 242 case DW_PCIE_AS_IO: 243 type = PCIE_ATU_TYPE_IO; 244 break; 245 default: 246 return -EINVAL; 247 } 248 249 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); 250 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, 251 PCIE_ATU_ENABLE | 252 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 253 254 /* 255 * Make sure ATU enable takes effect before any subsequent config 256 * and I/O accesses. 257 */ 258 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 259 val = dw_pcie_readl_ib_unroll(pci, index, 260 PCIE_ATU_UNR_REGION_CTRL2); 261 if (val & PCIE_ATU_ENABLE) 262 return 0; 263 264 mdelay(LINK_WAIT_IATU); 265 } 266 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 267 268 return -EBUSY; 269 } 270 271 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, 272 u64 cpu_addr, enum dw_pcie_as_type as_type) 273 { 274 int type; 275 u32 retries, val; 276 277 if (pci->iatu_unroll_enabled) 278 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, 279 cpu_addr, as_type); 280 281 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | 282 index); 283 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); 284 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); 285 286 switch (as_type) { 287 case DW_PCIE_AS_MEM: 288 type = PCIE_ATU_TYPE_MEM; 289 break; 290 case DW_PCIE_AS_IO: 291 type = PCIE_ATU_TYPE_IO; 292 break; 293 default: 294 return -EINVAL; 295 } 296 297 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); 298 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE 299 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); 300 301 /* 302 * Make sure ATU enable takes effect before any subsequent config 303 * and I/O accesses. 304 */ 305 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 306 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); 307 if (val & PCIE_ATU_ENABLE) 308 return 0; 309 310 mdelay(LINK_WAIT_IATU); 311 } 312 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 313 314 return -EBUSY; 315 } 316 317 void dw_pcie_disable_atu(struct dw_pcie *pci, int index, 318 enum dw_pcie_region_type type) 319 { 320 int region; 321 322 switch (type) { 323 case DW_PCIE_REGION_INBOUND: 324 region = PCIE_ATU_REGION_INBOUND; 325 break; 326 case DW_PCIE_REGION_OUTBOUND: 327 region = PCIE_ATU_REGION_OUTBOUND; 328 break; 329 default: 330 return; 331 } 332 333 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); 334 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE); 335 } 336 337 int dw_pcie_wait_for_link(struct dw_pcie *pci) 338 { 339 int retries; 340 341 /* Check if the link is up or not */ 342 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 343 if (dw_pcie_link_up(pci)) { 344 dev_info(pci->dev, "Link up\n"); 345 return 0; 346 } 347 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 348 } 349 350 dev_err(pci->dev, "Phy link never came up\n"); 351 352 return -ETIMEDOUT; 353 } 354 355 int dw_pcie_link_up(struct dw_pcie *pci) 356 { 357 u32 val; 358 359 if (pci->ops->link_up) 360 return pci->ops->link_up(pci); 361 362 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); 363 return ((val & PCIE_PORT_DEBUG1_LINK_UP) && 364 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); 365 } 366 367 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) 368 { 369 u32 val; 370 371 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); 372 if (val == 0xffffffff) 373 return 1; 374 375 return 0; 376 } 377 378 void dw_pcie_setup(struct dw_pcie *pci) 379 { 380 int ret; 381 u32 val; 382 u32 lanes; 383 struct device *dev = pci->dev; 384 struct device_node *np = dev->of_node; 385 386 if (pci->version >= 0x480A || (!pci->version && 387 dw_pcie_iatu_unroll_enabled(pci))) { 388 pci->iatu_unroll_enabled = true; 389 if (!pci->atu_base) 390 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; 391 } 392 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? 393 "enabled" : "disabled"); 394 395 396 ret = of_property_read_u32(np, "num-lanes", &lanes); 397 if (ret) 398 lanes = 0; 399 400 /* Set the number of lanes */ 401 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 402 val &= ~PORT_LINK_MODE_MASK; 403 switch (lanes) { 404 case 1: 405 val |= PORT_LINK_MODE_1_LANES; 406 break; 407 case 2: 408 val |= PORT_LINK_MODE_2_LANES; 409 break; 410 case 4: 411 val |= PORT_LINK_MODE_4_LANES; 412 break; 413 case 8: 414 val |= PORT_LINK_MODE_8_LANES; 415 break; 416 default: 417 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); 418 return; 419 } 420 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 421 422 /* Set link width speed control register */ 423 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 424 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 425 switch (lanes) { 426 case 1: 427 val |= PORT_LOGIC_LINK_WIDTH_1_LANES; 428 break; 429 case 2: 430 val |= PORT_LOGIC_LINK_WIDTH_2_LANES; 431 break; 432 case 4: 433 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 434 break; 435 case 8: 436 val |= PORT_LOGIC_LINK_WIDTH_8_LANES; 437 break; 438 } 439 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 440 } 441