1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe endpoint driver for Renesas R-Car SoCs 4 * Copyright (c) 2020 Renesas Electronics Europe GmbH 5 * 6 * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/of_address.h> 12 #include <linux/of_irq.h> 13 #include <linux/of_pci.h> 14 #include <linux/of_platform.h> 15 #include <linux/pci.h> 16 #include <linux/pci-epc.h> 17 #include <linux/phy/phy.h> 18 #include <linux/platform_device.h> 19 20 #include "pcie-rcar.h" 21 22 #define RCAR_EPC_MAX_FUNCTIONS 1 23 24 /* Structure representing the PCIe interface */ 25 struct rcar_pcie_endpoint { 26 struct rcar_pcie pcie; 27 phys_addr_t *ob_mapped_addr; 28 struct pci_epc_mem_window *ob_window; 29 u8 max_functions; 30 unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS]; 31 unsigned long *ib_window_map; 32 u32 num_ib_windows; 33 u32 num_ob_windows; 34 }; 35 36 static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie) 37 { 38 u32 val; 39 40 rcar_pci_write_reg(pcie, 0, PCIETCTLR); 41 42 /* Set endpoint mode */ 43 rcar_pci_write_reg(pcie, 0, PCIEMSR); 44 45 /* Initialize default capabilities. */ 46 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); 47 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), 48 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4); 49 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, 50 PCI_HEADER_TYPE_NORMAL); 51 52 /* Write out the physical slot number = 0 */ 53 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); 54 55 val = rcar_pci_read_reg(pcie, EXPCAP(1)); 56 /* device supports fixed 128 bytes MPSS */ 57 val &= ~GENMASK(2, 0); 58 rcar_pci_write_reg(pcie, val, EXPCAP(1)); 59 60 val = rcar_pci_read_reg(pcie, EXPCAP(2)); 61 /* read requests size 128 bytes */ 62 val &= ~GENMASK(14, 12); 63 /* payload size 128 bytes */ 64 val &= ~GENMASK(7, 5); 65 rcar_pci_write_reg(pcie, val, EXPCAP(2)); 66 67 /* Set target link speed to 5.0 GT/s */ 68 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, 69 PCI_EXP_LNKSTA_CLS_5_0GB); 70 71 /* Set the completion timer timeout to the maximum 50ms. */ 72 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); 73 74 /* Terminate list of capabilities (Next Capability Offset=0) */ 75 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); 76 77 /* flush modifications */ 78 wmb(); 79 } 80 81 static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep, 82 phys_addr_t addr) 83 { 84 int i; 85 86 for (i = 0; i < ep->num_ob_windows; i++) 87 if (ep->ob_window[i].phys_base == addr) 88 return i; 89 90 return -EINVAL; 91 } 92 93 static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep, 94 struct platform_device *pdev) 95 { 96 struct rcar_pcie *pcie = &ep->pcie; 97 char outbound_name[10]; 98 struct resource *res; 99 unsigned int i = 0; 100 101 ep->num_ob_windows = 0; 102 for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) { 103 sprintf(outbound_name, "memory%u", i); 104 res = platform_get_resource_byname(pdev, 105 IORESOURCE_MEM, 106 outbound_name); 107 if (!res) { 108 dev_err(pcie->dev, "missing outbound window %u\n", i); 109 return -EINVAL; 110 } 111 if (!devm_request_mem_region(&pdev->dev, res->start, 112 resource_size(res), 113 outbound_name)) { 114 dev_err(pcie->dev, "Cannot request memory region %s.\n", 115 outbound_name); 116 return -EIO; 117 } 118 119 ep->ob_window[i].phys_base = res->start; 120 ep->ob_window[i].size = resource_size(res); 121 /* controller doesn't support multiple allocation 122 * from same window, so set page_size to window size 123 */ 124 ep->ob_window[i].page_size = resource_size(res); 125 } 126 ep->num_ob_windows = i; 127 128 return 0; 129 } 130 131 static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep, 132 struct platform_device *pdev) 133 { 134 struct rcar_pcie *pcie = &ep->pcie; 135 struct pci_epc_mem_window *window; 136 struct device *dev = pcie->dev; 137 struct resource res; 138 int err; 139 140 err = of_address_to_resource(dev->of_node, 0, &res); 141 if (err) 142 return err; 143 pcie->base = devm_ioremap_resource(dev, &res); 144 if (IS_ERR(pcie->base)) 145 return PTR_ERR(pcie->base); 146 147 ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES, 148 sizeof(*window), GFP_KERNEL); 149 if (!ep->ob_window) 150 return -ENOMEM; 151 152 rcar_pcie_parse_outbound_ranges(ep, pdev); 153 154 err = of_property_read_u8(dev->of_node, "max-functions", 155 &ep->max_functions); 156 if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS) 157 ep->max_functions = RCAR_EPC_MAX_FUNCTIONS; 158 159 return 0; 160 } 161 162 static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 163 struct pci_epf_header *hdr) 164 { 165 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 166 struct rcar_pcie *pcie = &ep->pcie; 167 u32 val; 168 169 if (!fn) 170 val = hdr->vendorid; 171 else 172 val = rcar_pci_read_reg(pcie, IDSETR0); 173 val |= hdr->deviceid << 16; 174 rcar_pci_write_reg(pcie, val, IDSETR0); 175 176 val = hdr->revid; 177 val |= hdr->progif_code << 8; 178 val |= hdr->subclass_code << 16; 179 val |= hdr->baseclass_code << 24; 180 rcar_pci_write_reg(pcie, val, IDSETR1); 181 182 if (!fn) 183 val = hdr->subsys_vendor_id; 184 else 185 val = rcar_pci_read_reg(pcie, SUBIDSETR); 186 val |= hdr->subsys_id << 16; 187 rcar_pci_write_reg(pcie, val, SUBIDSETR); 188 189 if (hdr->interrupt_pin > PCI_INTERRUPT_INTA) 190 return -EINVAL; 191 val = rcar_pci_read_reg(pcie, PCICONF(15)); 192 val |= (hdr->interrupt_pin << 8); 193 rcar_pci_write_reg(pcie, val, PCICONF(15)); 194 195 return 0; 196 } 197 198 static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 199 struct pci_epf_bar *epf_bar) 200 { 201 int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT; 202 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 203 u64 size = 1ULL << fls64(epf_bar->size - 1); 204 dma_addr_t cpu_addr = epf_bar->phys_addr; 205 enum pci_barno bar = epf_bar->barno; 206 struct rcar_pcie *pcie = &ep->pcie; 207 u32 mask; 208 int idx; 209 int err; 210 211 idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); 212 if (idx >= ep->num_ib_windows) { 213 dev_err(pcie->dev, "no free inbound window\n"); 214 return -EINVAL; 215 } 216 217 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) 218 flags |= IO_SPACE; 219 220 ep->bar_to_atu[bar] = idx; 221 /* use 64-bit BARs */ 222 set_bit(idx, ep->ib_window_map); 223 set_bit(idx + 1, ep->ib_window_map); 224 225 if (cpu_addr > 0) { 226 unsigned long nr_zeros = __ffs64(cpu_addr); 227 u64 alignment = 1ULL << nr_zeros; 228 229 size = min(size, alignment); 230 } 231 232 size = min(size, 1ULL << 32); 233 234 mask = roundup_pow_of_two(size) - 1; 235 mask &= ~0xf; 236 237 rcar_pcie_set_inbound(pcie, cpu_addr, 238 0x0, mask | flags, idx, false); 239 240 err = rcar_pcie_wait_for_phyrdy(pcie); 241 if (err) { 242 dev_err(pcie->dev, "phy not ready\n"); 243 return -EINVAL; 244 } 245 246 return 0; 247 } 248 249 static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 250 struct pci_epf_bar *epf_bar) 251 { 252 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 253 enum pci_barno bar = epf_bar->barno; 254 u32 atu_index = ep->bar_to_atu[bar]; 255 256 rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false); 257 258 clear_bit(atu_index, ep->ib_window_map); 259 clear_bit(atu_index + 1, ep->ib_window_map); 260 } 261 262 static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, 263 u8 interrupts) 264 { 265 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 266 struct rcar_pcie *pcie = &ep->pcie; 267 u32 flags; 268 269 flags = rcar_pci_read_reg(pcie, MSICAP(fn)); 270 flags |= interrupts << MSICAP0_MMESCAP_OFFSET; 271 rcar_pci_write_reg(pcie, flags, MSICAP(fn)); 272 273 return 0; 274 } 275 276 static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 277 { 278 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 279 struct rcar_pcie *pcie = &ep->pcie; 280 u32 flags; 281 282 flags = rcar_pci_read_reg(pcie, MSICAP(fn)); 283 if (!(flags & MSICAP0_MSIE)) 284 return -EINVAL; 285 286 return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); 287 } 288 289 static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 290 phys_addr_t addr, u64 pci_addr, size_t size) 291 { 292 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 293 struct rcar_pcie *pcie = &ep->pcie; 294 struct resource_entry win; 295 struct resource res; 296 int window; 297 int err; 298 299 /* check if we have a link. */ 300 err = rcar_pcie_wait_for_dl(pcie); 301 if (err) { 302 dev_err(pcie->dev, "link not up\n"); 303 return err; 304 } 305 306 window = rcar_pcie_ep_get_window(ep, addr); 307 if (window < 0) { 308 dev_err(pcie->dev, "failed to get corresponding window\n"); 309 return -EINVAL; 310 } 311 312 memset(&win, 0x0, sizeof(win)); 313 memset(&res, 0x0, sizeof(res)); 314 res.start = pci_addr; 315 res.end = pci_addr + size - 1; 316 res.flags = IORESOURCE_MEM; 317 win.res = &res; 318 319 rcar_pcie_set_outbound(pcie, window, &win); 320 321 ep->ob_mapped_addr[window] = addr; 322 323 return 0; 324 } 325 326 static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 327 phys_addr_t addr) 328 { 329 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 330 struct resource_entry win; 331 struct resource res; 332 int idx; 333 334 for (idx = 0; idx < ep->num_ob_windows; idx++) 335 if (ep->ob_mapped_addr[idx] == addr) 336 break; 337 338 if (idx >= ep->num_ob_windows) 339 return; 340 341 memset(&win, 0x0, sizeof(win)); 342 memset(&res, 0x0, sizeof(res)); 343 win.res = &res; 344 rcar_pcie_set_outbound(&ep->pcie, idx, &win); 345 346 ep->ob_mapped_addr[idx] = 0; 347 } 348 349 static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep, 350 u8 fn, u8 intx) 351 { 352 struct rcar_pcie *pcie = &ep->pcie; 353 u32 val; 354 355 val = rcar_pci_read_reg(pcie, PCIEMSITXR); 356 if ((val & PCI_MSI_FLAGS_ENABLE)) { 357 dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n"); 358 return -EINVAL; 359 } 360 361 val = rcar_pci_read_reg(pcie, PCICONF(1)); 362 if ((val & INTDIS)) { 363 dev_err(pcie->dev, "INTx message transmission is disabled\n"); 364 return -EINVAL; 365 } 366 367 val = rcar_pci_read_reg(pcie, PCIEINTXR); 368 if ((val & ASTINTX)) { 369 dev_err(pcie->dev, "INTx is already asserted\n"); 370 return -EINVAL; 371 } 372 373 val |= ASTINTX; 374 rcar_pci_write_reg(pcie, val, PCIEINTXR); 375 usleep_range(1000, 1001); 376 val = rcar_pci_read_reg(pcie, PCIEINTXR); 377 val &= ~ASTINTX; 378 rcar_pci_write_reg(pcie, val, PCIEINTXR); 379 380 return 0; 381 } 382 383 static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie, 384 u8 fn, u8 interrupt_num) 385 { 386 u16 msi_count; 387 u32 val; 388 389 /* Check MSI enable bit */ 390 val = rcar_pci_read_reg(pcie, MSICAP(fn)); 391 if (!(val & MSICAP0_MSIE)) 392 return -EINVAL; 393 394 /* Get MSI numbers from MME */ 395 msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); 396 msi_count = 1 << msi_count; 397 398 if (!interrupt_num || interrupt_num > msi_count) 399 return -EINVAL; 400 401 val = rcar_pci_read_reg(pcie, PCIEMSITXR); 402 rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR); 403 404 return 0; 405 } 406 407 static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 408 enum pci_epc_irq_type type, 409 u16 interrupt_num) 410 { 411 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 412 413 switch (type) { 414 case PCI_EPC_IRQ_LEGACY: 415 return rcar_pcie_ep_assert_intx(ep, fn, 0); 416 417 case PCI_EPC_IRQ_MSI: 418 return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num); 419 420 default: 421 return -EINVAL; 422 } 423 } 424 425 static int rcar_pcie_ep_start(struct pci_epc *epc) 426 { 427 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 428 429 rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR); 430 rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR); 431 432 return 0; 433 } 434 435 static void rcar_pcie_ep_stop(struct pci_epc *epc) 436 { 437 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 438 439 rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR); 440 } 441 442 static const struct pci_epc_features rcar_pcie_epc_features = { 443 .linkup_notifier = false, 444 .msi_capable = true, 445 .msix_capable = false, 446 /* use 64-bit BARs so mark BAR[1,3,5] as reserved */ 447 .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5, 448 .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4, 449 .bar_fixed_size[0] = 128, 450 .bar_fixed_size[2] = 256, 451 .bar_fixed_size[4] = 256, 452 }; 453 454 static const struct pci_epc_features* 455 rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 456 { 457 return &rcar_pcie_epc_features; 458 } 459 460 static const struct pci_epc_ops rcar_pcie_epc_ops = { 461 .write_header = rcar_pcie_ep_write_header, 462 .set_bar = rcar_pcie_ep_set_bar, 463 .clear_bar = rcar_pcie_ep_clear_bar, 464 .set_msi = rcar_pcie_ep_set_msi, 465 .get_msi = rcar_pcie_ep_get_msi, 466 .map_addr = rcar_pcie_ep_map_addr, 467 .unmap_addr = rcar_pcie_ep_unmap_addr, 468 .raise_irq = rcar_pcie_ep_raise_irq, 469 .start = rcar_pcie_ep_start, 470 .stop = rcar_pcie_ep_stop, 471 .get_features = rcar_pcie_ep_get_features, 472 }; 473 474 static const struct of_device_id rcar_pcie_ep_of_match[] = { 475 { .compatible = "renesas,r8a774c0-pcie-ep", }, 476 { .compatible = "renesas,rcar-gen3-pcie-ep" }, 477 { }, 478 }; 479 480 static int rcar_pcie_ep_probe(struct platform_device *pdev) 481 { 482 struct device *dev = &pdev->dev; 483 struct rcar_pcie_endpoint *ep; 484 struct rcar_pcie *pcie; 485 struct pci_epc *epc; 486 int err; 487 488 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); 489 if (!ep) 490 return -ENOMEM; 491 492 pcie = &ep->pcie; 493 pcie->dev = dev; 494 495 pm_runtime_enable(dev); 496 err = pm_runtime_resume_and_get(dev); 497 if (err < 0) { 498 dev_err(dev, "pm_runtime_resume_and_get failed\n"); 499 goto err_pm_disable; 500 } 501 502 err = rcar_pcie_ep_get_pdata(ep, pdev); 503 if (err < 0) { 504 dev_err(dev, "failed to request resources: %d\n", err); 505 goto err_pm_put; 506 } 507 508 ep->num_ib_windows = MAX_NR_INBOUND_MAPS; 509 ep->ib_window_map = 510 devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows), 511 sizeof(long), GFP_KERNEL); 512 if (!ep->ib_window_map) { 513 err = -ENOMEM; 514 dev_err(dev, "failed to allocate memory for inbound map\n"); 515 goto err_pm_put; 516 } 517 518 ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows, 519 sizeof(*ep->ob_mapped_addr), 520 GFP_KERNEL); 521 if (!ep->ob_mapped_addr) { 522 err = -ENOMEM; 523 dev_err(dev, "failed to allocate memory for outbound memory pointers\n"); 524 goto err_pm_put; 525 } 526 527 epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops); 528 if (IS_ERR(epc)) { 529 dev_err(dev, "failed to create epc device\n"); 530 err = PTR_ERR(epc); 531 goto err_pm_put; 532 } 533 534 epc->max_functions = ep->max_functions; 535 epc_set_drvdata(epc, ep); 536 537 rcar_pcie_ep_hw_init(pcie); 538 539 err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows); 540 if (err < 0) { 541 dev_err(dev, "failed to initialize the epc memory space\n"); 542 goto err_pm_put; 543 } 544 545 return 0; 546 547 err_pm_put: 548 pm_runtime_put(dev); 549 550 err_pm_disable: 551 pm_runtime_disable(dev); 552 553 return err; 554 } 555 556 static struct platform_driver rcar_pcie_ep_driver = { 557 .driver = { 558 .name = "rcar-pcie-ep", 559 .of_match_table = rcar_pcie_ep_of_match, 560 .suppress_bind_attrs = true, 561 }, 562 .probe = rcar_pcie_ep_probe, 563 }; 564 builtin_platform_driver(rcar_pcie_ep_driver); 565