1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2009 - 2019 Broadcom */ 3 4 #include <linux/bitfield.h> 5 #include <linux/bitops.h> 6 #include <linux/clk.h> 7 #include <linux/compiler.h> 8 #include <linux/delay.h> 9 #include <linux/init.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/ioport.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/kernel.h> 16 #include <linux/list.h> 17 #include <linux/log2.h> 18 #include <linux/module.h> 19 #include <linux/msi.h> 20 #include <linux/of_address.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_pci.h> 23 #include <linux/of_platform.h> 24 #include <linux/pci.h> 25 #include <linux/printk.h> 26 #include <linux/sizes.h> 27 #include <linux/slab.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 31 #include "../pci.h" 32 33 /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */ 34 #define BRCM_PCIE_CAP_REGS 0x00ac 35 36 /* Broadcom STB PCIe Register Offsets */ 37 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188 38 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc 39 #define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0 40 41 #define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c 42 #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff 43 44 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc 45 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 46 47 #define PCIE_RC_DL_MDIO_ADDR 0x1100 48 #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 49 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 50 51 #define PCIE_MISC_MISC_CTRL 0x4008 52 #define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000 53 #define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000 54 #define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000 55 #define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128 0x0 56 #define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000 57 58 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c 59 #define PCIE_MEM_WIN0_LO(win) \ 60 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8) 61 62 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010 63 #define PCIE_MEM_WIN0_HI(win) \ 64 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8) 65 66 #define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c 67 #define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f 68 69 #define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034 70 #define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f 71 #define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038 72 73 #define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c 74 #define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f 75 76 #define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044 77 #define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048 78 79 #define PCIE_MISC_MSI_DATA_CONFIG 0x404c 80 #define PCIE_MISC_MSI_DATA_CONFIG_VAL 0xffe06540 81 82 #define PCIE_MISC_PCIE_CTRL 0x4064 83 #define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1 84 85 #define PCIE_MISC_PCIE_STATUS 0x4068 86 #define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80 87 #define PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK 0x20 88 #define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10 89 #define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40 90 91 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070 92 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000 93 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0 94 #define PCIE_MEM_WIN0_BASE_LIMIT(win) \ 95 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4) 96 97 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080 98 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK 0xff 99 #define PCIE_MEM_WIN0_BASE_HI(win) \ 100 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8) 101 102 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084 103 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff 104 #define PCIE_MEM_WIN0_LIMIT_HI(win) \ 105 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8) 106 107 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 108 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 109 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 110 111 #define PCIE_MSI_INTR2_STATUS 0x4500 112 #define PCIE_MSI_INTR2_CLR 0x4508 113 #define PCIE_MSI_INTR2_MASK_SET 0x4510 114 #define PCIE_MSI_INTR2_MASK_CLR 0x4514 115 116 #define PCIE_EXT_CFG_DATA 0x8000 117 118 #define PCIE_EXT_CFG_INDEX 0x9000 119 #define PCIE_EXT_BUSNUM_SHIFT 20 120 #define PCIE_EXT_SLOT_SHIFT 15 121 #define PCIE_EXT_FUNC_SHIFT 12 122 123 #define PCIE_RGR1_SW_INIT_1 0x9210 124 #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1 125 #define PCIE_RGR1_SW_INIT_1_INIT_MASK 0x2 126 127 /* PCIe parameters */ 128 #define BRCM_NUM_PCIE_OUT_WINS 0x4 129 #define BRCM_INT_PCI_MSI_NR 32 130 131 /* MSI target adresses */ 132 #define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL 133 #define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL 134 135 /* MDIO registers */ 136 #define MDIO_PORT0 0x0 137 #define MDIO_DATA_MASK 0x7fffffff 138 #define MDIO_PORT_MASK 0xf0000 139 #define MDIO_REGAD_MASK 0xffff 140 #define MDIO_CMD_MASK 0xfff00000 141 #define MDIO_CMD_READ 0x1 142 #define MDIO_CMD_WRITE 0x0 143 #define MDIO_DATA_DONE_MASK 0x80000000 144 #define MDIO_RD_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0) 145 #define MDIO_WT_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1) 146 #define SSC_REGS_ADDR 0x1100 147 #define SET_ADDR_OFFSET 0x1f 148 #define SSC_CNTL_OFFSET 0x2 149 #define SSC_CNTL_OVRD_EN_MASK 0x8000 150 #define SSC_CNTL_OVRD_VAL_MASK 0x4000 151 #define SSC_STATUS_OFFSET 0x1 152 #define SSC_STATUS_SSC_MASK 0x400 153 #define SSC_STATUS_PLL_LOCK_MASK 0x800 154 155 struct brcm_msi { 156 struct device *dev; 157 void __iomem *base; 158 struct device_node *np; 159 struct irq_domain *msi_domain; 160 struct irq_domain *inner_domain; 161 struct mutex lock; /* guards the alloc/free operations */ 162 u64 target_addr; 163 int irq; 164 /* used indicates which MSI interrupts have been alloc'd */ 165 unsigned long used; 166 }; 167 168 /* Internal PCIe Host Controller Information.*/ 169 struct brcm_pcie { 170 struct device *dev; 171 void __iomem *base; 172 struct clk *clk; 173 struct device_node *np; 174 bool ssc; 175 int gen; 176 u64 msi_target_addr; 177 struct brcm_msi *msi; 178 }; 179 180 /* 181 * This is to convert the size of the inbound "BAR" region to the 182 * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE 183 */ 184 static int brcm_pcie_encode_ibar_size(u64 size) 185 { 186 int log2_in = ilog2(size); 187 188 if (log2_in >= 12 && log2_in <= 15) 189 /* Covers 4KB to 32KB (inclusive) */ 190 return (log2_in - 12) + 0x1c; 191 else if (log2_in >= 16 && log2_in <= 35) 192 /* Covers 64KB to 32GB, (inclusive) */ 193 return log2_in - 15; 194 /* Something is awry so disable */ 195 return 0; 196 } 197 198 static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd) 199 { 200 u32 pkt = 0; 201 202 pkt |= FIELD_PREP(MDIO_PORT_MASK, port); 203 pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad); 204 pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd); 205 206 return pkt; 207 } 208 209 /* negative return value indicates error */ 210 static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val) 211 { 212 int tries; 213 u32 data; 214 215 writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ), 216 base + PCIE_RC_DL_MDIO_ADDR); 217 readl(base + PCIE_RC_DL_MDIO_ADDR); 218 219 data = readl(base + PCIE_RC_DL_MDIO_RD_DATA); 220 for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) { 221 udelay(10); 222 data = readl(base + PCIE_RC_DL_MDIO_RD_DATA); 223 } 224 225 *val = FIELD_GET(MDIO_DATA_MASK, data); 226 return MDIO_RD_DONE(data) ? 0 : -EIO; 227 } 228 229 /* negative return value indicates error */ 230 static int brcm_pcie_mdio_write(void __iomem *base, u8 port, 231 u8 regad, u16 wrdata) 232 { 233 int tries; 234 u32 data; 235 236 writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE), 237 base + PCIE_RC_DL_MDIO_ADDR); 238 readl(base + PCIE_RC_DL_MDIO_ADDR); 239 writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA); 240 241 data = readl(base + PCIE_RC_DL_MDIO_WR_DATA); 242 for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) { 243 udelay(10); 244 data = readl(base + PCIE_RC_DL_MDIO_WR_DATA); 245 } 246 247 return MDIO_WT_DONE(data) ? 0 : -EIO; 248 } 249 250 /* 251 * Configures device for Spread Spectrum Clocking (SSC) mode; a negative 252 * return value indicates error. 253 */ 254 static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) 255 { 256 int pll, ssc; 257 int ret; 258 u32 tmp; 259 260 ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 261 SSC_REGS_ADDR); 262 if (ret < 0) 263 return ret; 264 265 ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0, 266 SSC_CNTL_OFFSET, &tmp); 267 if (ret < 0) 268 return ret; 269 270 u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK); 271 u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK); 272 ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, 273 SSC_CNTL_OFFSET, tmp); 274 if (ret < 0) 275 return ret; 276 277 usleep_range(1000, 2000); 278 ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0, 279 SSC_STATUS_OFFSET, &tmp); 280 if (ret < 0) 281 return ret; 282 283 ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp); 284 pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp); 285 286 return ssc && pll ? 0 : -EIO; 287 } 288 289 /* Limits operation to a specific generation (1, 2, or 3) */ 290 static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) 291 { 292 u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); 293 u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); 294 295 lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; 296 writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); 297 298 lnkctl2 = (lnkctl2 & ~0xf) | gen; 299 writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); 300 } 301 302 static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, 303 unsigned int win, u64 cpu_addr, 304 u64 pcie_addr, u64 size) 305 { 306 u32 cpu_addr_mb_high, limit_addr_mb_high; 307 phys_addr_t cpu_addr_mb, limit_addr_mb; 308 int high_addr_shift; 309 u32 tmp; 310 311 /* Set the base of the pcie_addr window */ 312 writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win)); 313 writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win)); 314 315 /* Write the addr base & limit lower bits (in MBs) */ 316 cpu_addr_mb = cpu_addr / SZ_1M; 317 limit_addr_mb = (cpu_addr + size - 1) / SZ_1M; 318 319 tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); 320 u32p_replace_bits(&tmp, cpu_addr_mb, 321 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); 322 u32p_replace_bits(&tmp, limit_addr_mb, 323 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK); 324 writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); 325 326 /* Write the cpu & limit addr upper bits */ 327 high_addr_shift = 328 HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); 329 330 cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift; 331 tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win)); 332 u32p_replace_bits(&tmp, cpu_addr_mb_high, 333 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK); 334 writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win)); 335 336 limit_addr_mb_high = limit_addr_mb >> high_addr_shift; 337 tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); 338 u32p_replace_bits(&tmp, limit_addr_mb_high, 339 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK); 340 writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); 341 } 342 343 static struct irq_chip brcm_msi_irq_chip = { 344 .name = "BRCM STB PCIe MSI", 345 .irq_ack = irq_chip_ack_parent, 346 .irq_mask = pci_msi_mask_irq, 347 .irq_unmask = pci_msi_unmask_irq, 348 }; 349 350 static struct msi_domain_info brcm_msi_domain_info = { 351 /* Multi MSI is supported by the controller, but not by this driver */ 352 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), 353 .chip = &brcm_msi_irq_chip, 354 }; 355 356 static void brcm_pcie_msi_isr(struct irq_desc *desc) 357 { 358 struct irq_chip *chip = irq_desc_get_chip(desc); 359 unsigned long status, virq; 360 struct brcm_msi *msi; 361 struct device *dev; 362 u32 bit; 363 364 chained_irq_enter(chip, desc); 365 msi = irq_desc_get_handler_data(desc); 366 dev = msi->dev; 367 368 status = readl(msi->base + PCIE_MSI_INTR2_STATUS); 369 for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) { 370 virq = irq_find_mapping(msi->inner_domain, bit); 371 if (virq) 372 generic_handle_irq(virq); 373 else 374 dev_dbg(dev, "unexpected MSI\n"); 375 } 376 377 chained_irq_exit(chip, desc); 378 } 379 380 static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 381 { 382 struct brcm_msi *msi = irq_data_get_irq_chip_data(data); 383 384 msg->address_lo = lower_32_bits(msi->target_addr); 385 msg->address_hi = upper_32_bits(msi->target_addr); 386 msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq; 387 } 388 389 static int brcm_msi_set_affinity(struct irq_data *irq_data, 390 const struct cpumask *mask, bool force) 391 { 392 return -EINVAL; 393 } 394 395 static void brcm_msi_ack_irq(struct irq_data *data) 396 { 397 struct brcm_msi *msi = irq_data_get_irq_chip_data(data); 398 399 writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR); 400 } 401 402 403 static struct irq_chip brcm_msi_bottom_irq_chip = { 404 .name = "BRCM STB MSI", 405 .irq_compose_msi_msg = brcm_msi_compose_msi_msg, 406 .irq_set_affinity = brcm_msi_set_affinity, 407 .irq_ack = brcm_msi_ack_irq, 408 }; 409 410 static int brcm_msi_alloc(struct brcm_msi *msi) 411 { 412 int hwirq; 413 414 mutex_lock(&msi->lock); 415 hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0); 416 mutex_unlock(&msi->lock); 417 418 return hwirq; 419 } 420 421 static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq) 422 { 423 mutex_lock(&msi->lock); 424 bitmap_release_region(&msi->used, hwirq, 0); 425 mutex_unlock(&msi->lock); 426 } 427 428 static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 429 unsigned int nr_irqs, void *args) 430 { 431 struct brcm_msi *msi = domain->host_data; 432 int hwirq; 433 434 hwirq = brcm_msi_alloc(msi); 435 436 if (hwirq < 0) 437 return hwirq; 438 439 irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq, 440 &brcm_msi_bottom_irq_chip, domain->host_data, 441 handle_edge_irq, NULL, NULL); 442 return 0; 443 } 444 445 static void brcm_irq_domain_free(struct irq_domain *domain, 446 unsigned int virq, unsigned int nr_irqs) 447 { 448 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 449 struct brcm_msi *msi = irq_data_get_irq_chip_data(d); 450 451 brcm_msi_free(msi, d->hwirq); 452 } 453 454 static const struct irq_domain_ops msi_domain_ops = { 455 .alloc = brcm_irq_domain_alloc, 456 .free = brcm_irq_domain_free, 457 }; 458 459 static int brcm_allocate_domains(struct brcm_msi *msi) 460 { 461 struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np); 462 struct device *dev = msi->dev; 463 464 msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR, 465 &msi_domain_ops, msi); 466 if (!msi->inner_domain) { 467 dev_err(dev, "failed to create IRQ domain\n"); 468 return -ENOMEM; 469 } 470 471 msi->msi_domain = pci_msi_create_irq_domain(fwnode, 472 &brcm_msi_domain_info, 473 msi->inner_domain); 474 if (!msi->msi_domain) { 475 dev_err(dev, "failed to create MSI domain\n"); 476 irq_domain_remove(msi->inner_domain); 477 return -ENOMEM; 478 } 479 480 return 0; 481 } 482 483 static void brcm_free_domains(struct brcm_msi *msi) 484 { 485 irq_domain_remove(msi->msi_domain); 486 irq_domain_remove(msi->inner_domain); 487 } 488 489 static void brcm_msi_remove(struct brcm_pcie *pcie) 490 { 491 struct brcm_msi *msi = pcie->msi; 492 493 if (!msi) 494 return; 495 irq_set_chained_handler(msi->irq, NULL); 496 irq_set_handler_data(msi->irq, NULL); 497 brcm_free_domains(msi); 498 } 499 500 static void brcm_msi_set_regs(struct brcm_msi *msi) 501 { 502 writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR); 503 504 /* 505 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI 506 * enable, which we set to 1. 507 */ 508 writel(lower_32_bits(msi->target_addr) | 0x1, 509 msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO); 510 writel(upper_32_bits(msi->target_addr), 511 msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI); 512 513 writel(PCIE_MISC_MSI_DATA_CONFIG_VAL, 514 msi->base + PCIE_MISC_MSI_DATA_CONFIG); 515 } 516 517 static int brcm_pcie_enable_msi(struct brcm_pcie *pcie) 518 { 519 struct brcm_msi *msi; 520 int irq, ret; 521 struct device *dev = pcie->dev; 522 523 irq = irq_of_parse_and_map(dev->of_node, 1); 524 if (irq <= 0) { 525 dev_err(dev, "cannot map MSI interrupt\n"); 526 return -ENODEV; 527 } 528 529 msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL); 530 if (!msi) 531 return -ENOMEM; 532 533 mutex_init(&msi->lock); 534 msi->dev = dev; 535 msi->base = pcie->base; 536 msi->np = pcie->np; 537 msi->target_addr = pcie->msi_target_addr; 538 msi->irq = irq; 539 540 ret = brcm_allocate_domains(msi); 541 if (ret) 542 return ret; 543 544 irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi); 545 546 brcm_msi_set_regs(msi); 547 pcie->msi = msi; 548 549 return 0; 550 } 551 552 /* The controller is capable of serving in both RC and EP roles */ 553 static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie) 554 { 555 void __iomem *base = pcie->base; 556 u32 val = readl(base + PCIE_MISC_PCIE_STATUS); 557 558 return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val); 559 } 560 561 static bool brcm_pcie_link_up(struct brcm_pcie *pcie) 562 { 563 u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS); 564 u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val); 565 u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val); 566 567 return dla && plu; 568 } 569 570 /* Configuration space read/write support */ 571 static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg) 572 { 573 return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT) 574 | ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT) 575 | (busnr << PCIE_EXT_BUSNUM_SHIFT) 576 | (reg & ~3); 577 } 578 579 static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn, 580 int where) 581 { 582 struct brcm_pcie *pcie = bus->sysdata; 583 void __iomem *base = pcie->base; 584 int idx; 585 586 /* Accesses to the RC go right to the RC registers if slot==0 */ 587 if (pci_is_root_bus(bus)) 588 return PCI_SLOT(devfn) ? NULL : base + where; 589 590 /* For devices, write to the config space index register */ 591 idx = brcm_pcie_cfg_index(bus->number, devfn, 0); 592 writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); 593 return base + PCIE_EXT_CFG_DATA + where; 594 } 595 596 static struct pci_ops brcm_pcie_ops = { 597 .map_bus = brcm_pcie_map_conf, 598 .read = pci_generic_config_read, 599 .write = pci_generic_config_write, 600 }; 601 602 static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val) 603 { 604 u32 tmp; 605 606 tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1); 607 u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK); 608 writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1); 609 } 610 611 static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val) 612 { 613 u32 tmp; 614 615 tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1); 616 u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK); 617 writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1); 618 } 619 620 static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, 621 u64 *rc_bar2_size, 622 u64 *rc_bar2_offset) 623 { 624 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 625 struct device *dev = pcie->dev; 626 struct resource_entry *entry; 627 628 entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM); 629 if (!entry) 630 return -ENODEV; 631 632 633 /* 634 * The controller expects the inbound window offset to be calculated as 635 * the difference between PCIe's address space and CPU's. The offset 636 * provided by the firmware is calculated the opposite way, so we 637 * negate it. 638 */ 639 *rc_bar2_offset = -entry->offset; 640 *rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start); 641 642 /* 643 * We validate the inbound memory view even though we should trust 644 * whatever the device-tree provides. This is because of an HW issue on 645 * early Raspberry Pi 4's revisions (bcm2711). It turns out its 646 * firmware has to dynamically edit dma-ranges due to a bug on the 647 * PCIe controller integration, which prohibits any access above the 648 * lower 3GB of memory. Given this, we decided to keep the dma-ranges 649 * in check, avoiding hard to debug device-tree related issues in the 650 * future: 651 * 652 * The PCIe host controller by design must set the inbound viewport to 653 * be a contiguous arrangement of all of the system's memory. In 654 * addition, its size mut be a power of two. To further complicate 655 * matters, the viewport must start on a pcie-address that is aligned 656 * on a multiple of its size. If a portion of the viewport does not 657 * represent system memory -- e.g. 3GB of memory requires a 4GB 658 * viewport -- we can map the outbound memory in or after 3GB and even 659 * though the viewport will overlap the outbound memory the controller 660 * will know to send outbound memory downstream and everything else 661 * upstream. 662 * 663 * For example: 664 * 665 * - The best-case scenario, memory up to 3GB, is to place the inbound 666 * region in the first 4GB of pcie-space, as some legacy devices can 667 * only address 32bits. We would also like to put the MSI under 4GB 668 * as well, since some devices require a 32bit MSI target address. 669 * 670 * - If the system memory is 4GB or larger we cannot start the inbound 671 * region at location 0 (since we have to allow some space for 672 * outbound memory @ 3GB). So instead it will start at the 1x 673 * multiple of its size 674 */ 675 if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) || 676 (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) { 677 dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n", 678 *rc_bar2_size, *rc_bar2_offset); 679 return -EINVAL; 680 } 681 682 return 0; 683 } 684 685 static int brcm_pcie_setup(struct brcm_pcie *pcie) 686 { 687 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 688 u64 rc_bar2_offset, rc_bar2_size; 689 void __iomem *base = pcie->base; 690 struct device *dev = pcie->dev; 691 struct resource_entry *entry; 692 unsigned int scb_size_val; 693 bool ssc_good = false; 694 struct resource *res; 695 int num_out_wins = 0; 696 u16 nlw, cls, lnksta; 697 int i, ret; 698 u32 tmp, aspm_support; 699 700 /* Reset the bridge */ 701 brcm_pcie_bridge_sw_init_set(pcie, 1); 702 brcm_pcie_perst_set(pcie, 1); 703 704 usleep_range(100, 200); 705 706 /* Take the bridge out of reset */ 707 brcm_pcie_bridge_sw_init_set(pcie, 0); 708 709 tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); 710 tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; 711 writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); 712 /* Wait for SerDes to be stable */ 713 usleep_range(100, 200); 714 715 /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */ 716 u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK); 717 u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK); 718 u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128, 719 PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK); 720 writel(tmp, base + PCIE_MISC_MISC_CTRL); 721 722 ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size, 723 &rc_bar2_offset); 724 if (ret) 725 return ret; 726 727 tmp = lower_32_bits(rc_bar2_offset); 728 u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size), 729 PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK); 730 writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO); 731 writel(upper_32_bits(rc_bar2_offset), 732 base + PCIE_MISC_RC_BAR2_CONFIG_HI); 733 734 scb_size_val = rc_bar2_size ? 735 ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */ 736 tmp = readl(base + PCIE_MISC_MISC_CTRL); 737 u32p_replace_bits(&tmp, scb_size_val, 738 PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK); 739 writel(tmp, base + PCIE_MISC_MISC_CTRL); 740 741 /* 742 * We ideally want the MSI target address to be located in the 32bit 743 * addressable memory area. Some devices might depend on it. This is 744 * possible either when the inbound window is located above the lower 745 * 4GB or when the inbound area is smaller than 4GB (taking into 746 * account the rounding-up we're forced to perform). 747 */ 748 if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G) 749 pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB; 750 else 751 pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB; 752 753 /* disable the PCIe->GISB memory window (RC_BAR1) */ 754 tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO); 755 tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK; 756 writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO); 757 758 /* disable the PCIe->SCB memory window (RC_BAR3) */ 759 tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO); 760 tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK; 761 writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO); 762 763 /* Mask all interrupts since we are not handling any yet */ 764 writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET); 765 766 /* clear any interrupts we find on boot */ 767 writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR); 768 769 if (pcie->gen) 770 brcm_pcie_set_gen(pcie, pcie->gen); 771 772 /* Unassert the fundamental reset */ 773 brcm_pcie_perst_set(pcie, 0); 774 775 /* 776 * Give the RC/EP time to wake up, before trying to configure RC. 777 * Intermittently check status for link-up, up to a total of 100ms. 778 */ 779 for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) 780 msleep(5); 781 782 if (!brcm_pcie_link_up(pcie)) { 783 dev_err(dev, "link down\n"); 784 return -ENODEV; 785 } 786 787 if (!brcm_pcie_rc_mode(pcie)) { 788 dev_err(dev, "PCIe misconfigured; is in EP mode\n"); 789 return -EINVAL; 790 } 791 792 resource_list_for_each_entry(entry, &bridge->windows) { 793 res = entry->res; 794 795 if (resource_type(res) != IORESOURCE_MEM) 796 continue; 797 798 if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) { 799 dev_err(pcie->dev, "too many outbound wins\n"); 800 return -EINVAL; 801 } 802 803 brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start, 804 res->start - entry->offset, 805 resource_size(res)); 806 num_out_wins++; 807 } 808 809 /* Don't advertise L0s capability if 'aspm-no-l0s' */ 810 aspm_support = PCIE_LINK_STATE_L1; 811 if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) 812 aspm_support |= PCIE_LINK_STATE_L0S; 813 tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 814 u32p_replace_bits(&tmp, aspm_support, 815 PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); 816 writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 817 818 /* 819 * For config space accesses on the RC, show the right class for 820 * a PCIe-PCIe bridge (the default setting is to be EP mode). 821 */ 822 tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); 823 u32p_replace_bits(&tmp, 0x060400, 824 PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); 825 writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); 826 827 if (pcie->ssc) { 828 ret = brcm_pcie_set_ssc(pcie); 829 if (ret == 0) 830 ssc_good = true; 831 else 832 dev_err(dev, "failed attempt to enter ssc mode\n"); 833 } 834 835 lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA); 836 cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta); 837 nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); 838 dev_info(dev, "link up, %s x%u %s\n", 839 pci_speed_string(pcie_link_speed[cls]), nlw, 840 ssc_good ? "(SSC)" : "(!SSC)"); 841 842 /* PCIe->SCB endian mode for BAR */ 843 tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); 844 u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, 845 PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); 846 writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); 847 848 /* 849 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1 850 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1. 851 */ 852 tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); 853 tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; 854 writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); 855 856 return 0; 857 } 858 859 /* L23 is a low-power PCIe link state */ 860 static void brcm_pcie_enter_l23(struct brcm_pcie *pcie) 861 { 862 void __iomem *base = pcie->base; 863 int l23, i; 864 u32 tmp; 865 866 /* Assert request for L23 */ 867 tmp = readl(base + PCIE_MISC_PCIE_CTRL); 868 u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK); 869 writel(tmp, base + PCIE_MISC_PCIE_CTRL); 870 871 /* Wait up to 36 msec for L23 */ 872 tmp = readl(base + PCIE_MISC_PCIE_STATUS); 873 l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp); 874 for (i = 0; i < 15 && !l23; i++) { 875 usleep_range(2000, 2400); 876 tmp = readl(base + PCIE_MISC_PCIE_STATUS); 877 l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, 878 tmp); 879 } 880 881 if (!l23) 882 dev_err(pcie->dev, "failed to enter low-power link state\n"); 883 } 884 885 static void brcm_pcie_turn_off(struct brcm_pcie *pcie) 886 { 887 void __iomem *base = pcie->base; 888 int tmp; 889 890 if (brcm_pcie_link_up(pcie)) 891 brcm_pcie_enter_l23(pcie); 892 /* Assert fundamental reset */ 893 brcm_pcie_perst_set(pcie, 1); 894 895 /* Deassert request for L23 in case it was asserted */ 896 tmp = readl(base + PCIE_MISC_PCIE_CTRL); 897 u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK); 898 writel(tmp, base + PCIE_MISC_PCIE_CTRL); 899 900 /* Turn off SerDes */ 901 tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); 902 u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); 903 writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); 904 905 /* Shutdown PCIe bridge */ 906 brcm_pcie_bridge_sw_init_set(pcie, 1); 907 } 908 909 static void __brcm_pcie_remove(struct brcm_pcie *pcie) 910 { 911 brcm_msi_remove(pcie); 912 brcm_pcie_turn_off(pcie); 913 clk_disable_unprepare(pcie->clk); 914 } 915 916 static int brcm_pcie_remove(struct platform_device *pdev) 917 { 918 struct brcm_pcie *pcie = platform_get_drvdata(pdev); 919 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 920 921 pci_stop_root_bus(bridge->bus); 922 pci_remove_root_bus(bridge->bus); 923 __brcm_pcie_remove(pcie); 924 925 return 0; 926 } 927 928 static int brcm_pcie_probe(struct platform_device *pdev) 929 { 930 struct device_node *np = pdev->dev.of_node, *msi_np; 931 struct pci_host_bridge *bridge; 932 struct brcm_pcie *pcie; 933 int ret; 934 935 bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie)); 936 if (!bridge) 937 return -ENOMEM; 938 939 pcie = pci_host_bridge_priv(bridge); 940 pcie->dev = &pdev->dev; 941 pcie->np = np; 942 943 pcie->base = devm_platform_ioremap_resource(pdev, 0); 944 if (IS_ERR(pcie->base)) 945 return PTR_ERR(pcie->base); 946 947 pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie"); 948 if (IS_ERR(pcie->clk)) 949 return PTR_ERR(pcie->clk); 950 951 ret = of_pci_get_max_link_speed(np); 952 pcie->gen = (ret < 0) ? 0 : ret; 953 954 pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); 955 956 ret = clk_prepare_enable(pcie->clk); 957 if (ret) { 958 dev_err(&pdev->dev, "could not enable clock\n"); 959 return ret; 960 } 961 962 ret = brcm_pcie_setup(pcie); 963 if (ret) 964 goto fail; 965 966 msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); 967 if (pci_msi_enabled() && msi_np == pcie->np) { 968 ret = brcm_pcie_enable_msi(pcie); 969 if (ret) { 970 dev_err(pcie->dev, "probe of internal MSI failed"); 971 goto fail; 972 } 973 } 974 975 bridge->ops = &brcm_pcie_ops; 976 bridge->sysdata = pcie; 977 978 platform_set_drvdata(pdev, pcie); 979 980 return pci_host_probe(bridge); 981 fail: 982 __brcm_pcie_remove(pcie); 983 return ret; 984 } 985 986 static const struct of_device_id brcm_pcie_match[] = { 987 { .compatible = "brcm,bcm2711-pcie" }, 988 {}, 989 }; 990 MODULE_DEVICE_TABLE(of, brcm_pcie_match); 991 992 static struct platform_driver brcm_pcie_driver = { 993 .probe = brcm_pcie_probe, 994 .remove = brcm_pcie_remove, 995 .driver = { 996 .name = "brcm-pcie", 997 .of_match_table = brcm_pcie_match, 998 }, 999 }; 1000 module_platform_driver(brcm_pcie_driver); 1001 1002 MODULE_LICENSE("GPL"); 1003 MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); 1004 MODULE_AUTHOR("Broadcom"); 1005