1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe endpoint controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #include <linux/delay.h> 7 #include <linux/kernel.h> 8 #include <linux/of.h> 9 #include <linux/pci-epc.h> 10 #include <linux/platform_device.h> 11 #include <linux/sizes.h> 12 13 #include "pcie-cadence.h" 14 15 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ 16 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 18 19 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, 20 struct pci_epf_header *hdr) 21 { 22 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 23 struct cdns_pcie *pcie = &ep->pcie; 24 25 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); 26 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); 27 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); 28 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, 29 hdr->subclass_code | hdr->baseclass_code << 8); 30 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, 31 hdr->cache_line_size); 32 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); 33 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); 34 35 /* 36 * Vendor ID can only be modified from function 0, all other functions 37 * use the same vendor ID as function 0. 38 */ 39 if (fn == 0) { 40 /* Update the vendor IDs. */ 41 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | 42 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); 43 44 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); 45 } 46 47 return 0; 48 } 49 50 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, 51 struct pci_epf_bar *epf_bar) 52 { 53 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 54 struct cdns_pcie_epf *epf = &ep->epf[fn]; 55 struct cdns_pcie *pcie = &ep->pcie; 56 dma_addr_t bar_phys = epf_bar->phys_addr; 57 enum pci_barno bar = epf_bar->barno; 58 int flags = epf_bar->flags; 59 u32 addr0, addr1, reg, cfg, b, aperture, ctrl; 60 u64 sz; 61 62 /* BAR size is 2^(aperture + 7) */ 63 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); 64 /* 65 * roundup_pow_of_two() returns an unsigned long, which is not suited 66 * for 64bit values. 67 */ 68 sz = 1ULL << fls64(sz - 1); 69 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ 70 71 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 72 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; 73 } else { 74 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); 75 bool is_64bits = sz > SZ_2G; 76 77 if (is_64bits && (bar & 1)) 78 return -EINVAL; 79 80 if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) 81 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; 82 83 if (is_64bits && is_prefetch) 84 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; 85 else if (is_prefetch) 86 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; 87 else if (is_64bits) 88 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; 89 else 90 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; 91 } 92 93 addr0 = lower_32_bits(bar_phys); 94 addr1 = upper_32_bits(bar_phys); 95 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 96 addr0); 97 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 98 addr1); 99 100 if (bar < BAR_4) { 101 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); 102 b = bar; 103 } else { 104 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); 105 b = bar - BAR_4; 106 } 107 108 cfg = cdns_pcie_readl(pcie, reg); 109 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 110 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 111 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 112 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 113 cdns_pcie_writel(pcie, reg, cfg); 114 115 epf->epf_bar[bar] = epf_bar; 116 117 return 0; 118 } 119 120 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, 121 struct pci_epf_bar *epf_bar) 122 { 123 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 124 struct cdns_pcie_epf *epf = &ep->epf[fn]; 125 struct cdns_pcie *pcie = &ep->pcie; 126 enum pci_barno bar = epf_bar->barno; 127 u32 reg, cfg, b, ctrl; 128 129 if (bar < BAR_4) { 130 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); 131 b = bar; 132 } else { 133 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); 134 b = bar - BAR_4; 135 } 136 137 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 138 cfg = cdns_pcie_readl(pcie, reg); 139 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 140 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 141 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 142 cdns_pcie_writel(pcie, reg, cfg); 143 144 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); 145 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); 146 147 epf->epf_bar[bar] = NULL; 148 } 149 150 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, 151 u64 pci_addr, size_t size) 152 { 153 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 154 struct cdns_pcie *pcie = &ep->pcie; 155 u32 r; 156 157 r = find_first_zero_bit(&ep->ob_region_map, 158 sizeof(ep->ob_region_map) * BITS_PER_LONG); 159 if (r >= ep->max_regions - 1) { 160 dev_err(&epc->dev, "no free outbound region\n"); 161 return -EINVAL; 162 } 163 164 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); 165 166 set_bit(r, &ep->ob_region_map); 167 ep->ob_addr[r] = addr; 168 169 return 0; 170 } 171 172 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, 173 phys_addr_t addr) 174 { 175 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 176 struct cdns_pcie *pcie = &ep->pcie; 177 u32 r; 178 179 for (r = 0; r < ep->max_regions - 1; r++) 180 if (ep->ob_addr[r] == addr) 181 break; 182 183 if (r == ep->max_regions - 1) 184 return; 185 186 cdns_pcie_reset_outbound_region(pcie, r); 187 188 ep->ob_addr[r] = 0; 189 clear_bit(r, &ep->ob_region_map); 190 } 191 192 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) 193 { 194 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 195 struct cdns_pcie *pcie = &ep->pcie; 196 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 197 u16 flags; 198 199 /* 200 * Set the Multiple Message Capable bitfield into the Message Control 201 * register. 202 */ 203 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 204 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); 205 flags |= PCI_MSI_FLAGS_64BIT; 206 flags &= ~PCI_MSI_FLAGS_MASKBIT; 207 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); 208 209 return 0; 210 } 211 212 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) 213 { 214 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 215 struct cdns_pcie *pcie = &ep->pcie; 216 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 217 u16 flags, mme; 218 219 /* Validate that the MSI feature is actually enabled. */ 220 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 221 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 222 return -EINVAL; 223 224 /* 225 * Get the Multiple Message Enable bitfield from the Message Control 226 * register. 227 */ 228 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; 229 230 return mme; 231 } 232 233 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) 234 { 235 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 236 struct cdns_pcie *pcie = &ep->pcie; 237 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 238 u32 val, reg; 239 240 reg = cap + PCI_MSIX_FLAGS; 241 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); 242 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 243 return -EINVAL; 244 245 val &= PCI_MSIX_FLAGS_QSIZE; 246 247 return val; 248 } 249 250 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, 251 enum pci_barno bir, u32 offset) 252 { 253 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 254 struct cdns_pcie *pcie = &ep->pcie; 255 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 256 u32 val, reg; 257 258 reg = cap + PCI_MSIX_FLAGS; 259 val = cdns_pcie_ep_fn_readw(pcie, fn, reg); 260 val &= ~PCI_MSIX_FLAGS_QSIZE; 261 val |= interrupts; 262 cdns_pcie_ep_fn_writew(pcie, fn, reg, val); 263 264 /* Set MSIX BAR and offset */ 265 reg = cap + PCI_MSIX_TABLE; 266 val = offset | bir; 267 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 268 269 /* Set PBA BAR and offset. BAR must match MSIX BAR */ 270 reg = cap + PCI_MSIX_PBA; 271 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; 272 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 273 274 return 0; 275 } 276 277 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, 278 u8 intx, bool is_asserted) 279 { 280 struct cdns_pcie *pcie = &ep->pcie; 281 unsigned long flags; 282 u32 offset; 283 u16 status; 284 u8 msg_code; 285 286 intx &= 3; 287 288 /* Set the outbound region if needed. */ 289 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || 290 ep->irq_pci_fn != fn)) { 291 /* First region was reserved for IRQ writes. */ 292 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, 293 ep->irq_phys_addr); 294 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; 295 ep->irq_pci_fn = fn; 296 } 297 298 if (is_asserted) { 299 ep->irq_pending |= BIT(intx); 300 msg_code = MSG_CODE_ASSERT_INTA + intx; 301 } else { 302 ep->irq_pending &= ~BIT(intx); 303 msg_code = MSG_CODE_DEASSERT_INTA + intx; 304 } 305 306 spin_lock_irqsave(&ep->lock, flags); 307 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); 308 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { 309 status ^= PCI_STATUS_INTERRUPT; 310 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); 311 } 312 spin_unlock_irqrestore(&ep->lock, flags); 313 314 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | 315 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | 316 CDNS_PCIE_MSG_NO_DATA; 317 writel(0, ep->irq_cpu_addr + offset); 318 } 319 320 static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) 321 { 322 u16 cmd; 323 324 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); 325 if (cmd & PCI_COMMAND_INTX_DISABLE) 326 return -EINVAL; 327 328 cdns_pcie_ep_assert_intx(ep, fn, intx, true); 329 /* 330 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() 331 */ 332 mdelay(1); 333 cdns_pcie_ep_assert_intx(ep, fn, intx, false); 334 return 0; 335 } 336 337 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, 338 u8 interrupt_num) 339 { 340 struct cdns_pcie *pcie = &ep->pcie; 341 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 342 u16 flags, mme, data, data_mask; 343 u8 msi_count; 344 u64 pci_addr, pci_addr_mask = 0xff; 345 346 /* Check whether the MSI feature has been enabled by the PCI host. */ 347 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 348 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 349 return -EINVAL; 350 351 /* Get the number of enabled MSIs */ 352 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; 353 msi_count = 1 << mme; 354 if (!interrupt_num || interrupt_num > msi_count) 355 return -EINVAL; 356 357 /* Compute the data value to be written. */ 358 data_mask = msi_count - 1; 359 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 360 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); 361 362 /* Get the PCI address where to write the data into. */ 363 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 364 pci_addr <<= 32; 365 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 366 pci_addr &= GENMASK_ULL(63, 2); 367 368 /* Set the outbound region if needed. */ 369 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || 370 ep->irq_pci_fn != fn)) { 371 /* First region was reserved for IRQ writes. */ 372 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 373 false, 374 ep->irq_phys_addr, 375 pci_addr & ~pci_addr_mask, 376 pci_addr_mask + 1); 377 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); 378 ep->irq_pci_fn = fn; 379 } 380 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); 381 382 return 0; 383 } 384 385 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, 386 phys_addr_t addr, u8 interrupt_num, 387 u32 entry_size, u32 *msi_data, 388 u32 *msi_addr_offset) 389 { 390 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 391 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 392 struct cdns_pcie *pcie = &ep->pcie; 393 u64 pci_addr, pci_addr_mask = 0xff; 394 u16 flags, mme, data, data_mask; 395 u8 msi_count; 396 int ret; 397 int i; 398 399 /* Check whether the MSI feature has been enabled by the PCI host. */ 400 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 401 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 402 return -EINVAL; 403 404 /* Get the number of enabled MSIs */ 405 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; 406 msi_count = 1 << mme; 407 if (!interrupt_num || interrupt_num > msi_count) 408 return -EINVAL; 409 410 /* Compute the data value to be written. */ 411 data_mask = msi_count - 1; 412 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 413 data = data & ~data_mask; 414 415 /* Get the PCI address where to write the data into. */ 416 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 417 pci_addr <<= 32; 418 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 419 pci_addr &= GENMASK_ULL(63, 2); 420 421 for (i = 0; i < interrupt_num; i++) { 422 ret = cdns_pcie_ep_map_addr(epc, fn, addr, 423 pci_addr & ~pci_addr_mask, 424 entry_size); 425 if (ret) 426 return ret; 427 addr = addr + entry_size; 428 } 429 430 *msi_data = data; 431 *msi_addr_offset = pci_addr & pci_addr_mask; 432 433 return 0; 434 } 435 436 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, 437 u16 interrupt_num) 438 { 439 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 440 u32 tbl_offset, msg_data, reg; 441 struct cdns_pcie *pcie = &ep->pcie; 442 struct pci_epf_msix_tbl *msix_tbl; 443 struct cdns_pcie_epf *epf; 444 u64 pci_addr_mask = 0xff; 445 u64 msg_addr; 446 u16 flags; 447 u8 bir; 448 449 /* Check whether the MSI-X feature has been enabled by the PCI host. */ 450 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); 451 if (!(flags & PCI_MSIX_FLAGS_ENABLE)) 452 return -EINVAL; 453 454 reg = cap + PCI_MSIX_TABLE; 455 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); 456 bir = tbl_offset & PCI_MSIX_TABLE_BIR; 457 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 458 459 epf = &ep->epf[fn]; 460 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; 461 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 462 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 463 464 /* Set the outbound region if needed. */ 465 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || 466 ep->irq_pci_fn != fn) { 467 /* First region was reserved for IRQ writes. */ 468 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 469 false, 470 ep->irq_phys_addr, 471 msg_addr & ~pci_addr_mask, 472 pci_addr_mask + 1); 473 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); 474 ep->irq_pci_fn = fn; 475 } 476 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); 477 478 return 0; 479 } 480 481 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, 482 enum pci_epc_irq_type type, 483 u16 interrupt_num) 484 { 485 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 486 487 switch (type) { 488 case PCI_EPC_IRQ_LEGACY: 489 return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); 490 491 case PCI_EPC_IRQ_MSI: 492 return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); 493 494 case PCI_EPC_IRQ_MSIX: 495 return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); 496 497 default: 498 break; 499 } 500 501 return -EINVAL; 502 } 503 504 static int cdns_pcie_ep_start(struct pci_epc *epc) 505 { 506 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 507 struct cdns_pcie *pcie = &ep->pcie; 508 struct device *dev = pcie->dev; 509 int ret; 510 511 /* 512 * BIT(0) is hardwired to 1, hence function 0 is always enabled 513 * and can't be disabled anyway. 514 */ 515 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); 516 517 ret = cdns_pcie_start_link(pcie); 518 if (ret) { 519 dev_err(dev, "Failed to start link\n"); 520 return ret; 521 } 522 523 return 0; 524 } 525 526 static const struct pci_epc_features cdns_pcie_epc_features = { 527 .linkup_notifier = false, 528 .msi_capable = true, 529 .msix_capable = true, 530 .align = 256, 531 }; 532 533 static const struct pci_epc_features* 534 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) 535 { 536 return &cdns_pcie_epc_features; 537 } 538 539 static const struct pci_epc_ops cdns_pcie_epc_ops = { 540 .write_header = cdns_pcie_ep_write_header, 541 .set_bar = cdns_pcie_ep_set_bar, 542 .clear_bar = cdns_pcie_ep_clear_bar, 543 .map_addr = cdns_pcie_ep_map_addr, 544 .unmap_addr = cdns_pcie_ep_unmap_addr, 545 .set_msi = cdns_pcie_ep_set_msi, 546 .get_msi = cdns_pcie_ep_get_msi, 547 .set_msix = cdns_pcie_ep_set_msix, 548 .get_msix = cdns_pcie_ep_get_msix, 549 .raise_irq = cdns_pcie_ep_raise_irq, 550 .map_msi_irq = cdns_pcie_ep_map_msi_irq, 551 .start = cdns_pcie_ep_start, 552 .get_features = cdns_pcie_ep_get_features, 553 }; 554 555 556 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) 557 { 558 struct device *dev = ep->pcie.dev; 559 struct platform_device *pdev = to_platform_device(dev); 560 struct device_node *np = dev->of_node; 561 struct cdns_pcie *pcie = &ep->pcie; 562 struct resource *res; 563 struct pci_epc *epc; 564 int ret; 565 566 pcie->is_rc = false; 567 568 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); 569 if (IS_ERR(pcie->reg_base)) { 570 dev_err(dev, "missing \"reg\"\n"); 571 return PTR_ERR(pcie->reg_base); 572 } 573 574 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); 575 if (!res) { 576 dev_err(dev, "missing \"mem\"\n"); 577 return -EINVAL; 578 } 579 pcie->mem_res = res; 580 581 ep->max_regions = CDNS_PCIE_MAX_OB; 582 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); 583 584 ep->ob_addr = devm_kcalloc(dev, 585 ep->max_regions, sizeof(*ep->ob_addr), 586 GFP_KERNEL); 587 if (!ep->ob_addr) 588 return -ENOMEM; 589 590 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ 591 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); 592 593 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); 594 if (IS_ERR(epc)) { 595 dev_err(dev, "failed to create epc device\n"); 596 return PTR_ERR(epc); 597 } 598 599 epc_set_drvdata(epc, ep); 600 601 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) 602 epc->max_functions = 1; 603 604 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), 605 GFP_KERNEL); 606 if (!ep->epf) 607 return -ENOMEM; 608 609 ret = pci_epc_mem_init(epc, pcie->mem_res->start, 610 resource_size(pcie->mem_res), PAGE_SIZE); 611 if (ret < 0) { 612 dev_err(dev, "failed to initialize the memory space\n"); 613 return ret; 614 } 615 616 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, 617 SZ_128K); 618 if (!ep->irq_cpu_addr) { 619 dev_err(dev, "failed to reserve memory space for MSI\n"); 620 ret = -ENOMEM; 621 goto free_epc_mem; 622 } 623 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; 624 /* Reserve region 0 for IRQs */ 625 set_bit(0, &ep->ob_region_map); 626 spin_lock_init(&ep->lock); 627 628 return 0; 629 630 free_epc_mem: 631 pci_epc_mem_exit(epc); 632 633 return ret; 634 } 635