1 /* SPDX-License-Identifier: GPL-2.0 */ 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #ifndef _PCIE_CADENCE_H 7 #define _PCIE_CADENCE_H 8 9 #include <linux/kernel.h> 10 #include <linux/pci.h> 11 #include <linux/pci-epf.h> 12 #include <linux/phy/phy.h> 13 14 /* Parameters for the waiting for link up routine */ 15 #define LINK_WAIT_MAX_RETRIES 10 16 #define LINK_WAIT_USLEEP_MIN 90000 17 #define LINK_WAIT_USLEEP_MAX 100000 18 19 /* 20 * Local Management Registers 21 */ 22 #define CDNS_PCIE_LM_BASE 0x00100000 23 24 /* Vendor ID Register */ 25 #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) 26 #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) 27 #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 28 #define CDNS_PCIE_LM_ID_VENDOR(vid) \ 29 (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) 30 #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) 31 #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 32 #define CDNS_PCIE_LM_ID_SUBSYS(sub) \ 33 (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) 34 35 /* Root Port Requestor ID Register */ 36 #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) 37 #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) 38 #define CDNS_PCIE_LM_RP_RID_SHIFT 0 39 #define CDNS_PCIE_LM_RP_RID_(rid) \ 40 (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) 41 42 /* Endpoint Bus and Device Number Register */ 43 #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c) 44 #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) 45 #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 46 #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) 47 #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 48 49 /* Endpoint Function f BAR b Configuration Registers */ 50 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \ 51 (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn)) 52 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ 53 (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) 54 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ 55 (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) 56 #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \ 57 (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn)) 58 #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \ 59 (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008) 60 #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \ 61 (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008) 62 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ 63 (GENMASK(4, 0) << ((b) * 8)) 64 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ 65 (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) 66 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ 67 (GENMASK(7, 5) << ((b) * 8)) 68 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ 69 (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) 70 71 /* Endpoint Function Configuration Register */ 72 #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0) 73 74 /* Root Complex BAR Configuration Register */ 75 #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) 76 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) 77 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ 78 (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) 79 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) 80 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ 81 (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) 82 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) 83 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ 84 (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) 85 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) 86 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ 87 (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) 88 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) 89 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 90 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) 91 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) 92 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 93 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) 94 #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) 95 96 /* BAR control values applicable to both Endpoint Function and Root Complex */ 97 #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 98 #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 99 #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 100 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 101 #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 102 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 103 104 #define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ 105 (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) 106 #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ 107 (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) 108 #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ 109 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) 110 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ 111 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) 112 #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ 113 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) 114 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ 115 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) 116 #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ 117 (((aperture) - 2) << ((bar) * 8)) 118 119 /* 120 * Endpoint Function Registers (PCI configuration space for endpoint functions) 121 */ 122 #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) 123 124 #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 125 #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 126 #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200 127 128 /* 129 * Root Port Registers (PCI configuration space for the root port function) 130 */ 131 #define CDNS_PCIE_RP_BASE 0x00200000 132 #define CDNS_PCIE_RP_CAP_OFFSET 0xc0 133 134 /* 135 * Address Translation Registers 136 */ 137 #define CDNS_PCIE_AT_BASE 0x00400000 138 139 /* Region r Outbound AXI to PCIe Address Translation Register 0 */ 140 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ 141 (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) 142 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) 143 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ 144 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) 145 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) 146 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ 147 (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) 148 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) 149 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ 150 (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) 151 152 /* Region r Outbound AXI to PCIe Address Translation Register 1 */ 153 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ 154 (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) 155 156 /* Region r Outbound PCIe Descriptor Register 0 */ 157 #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ 158 (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) 159 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) 160 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 161 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 162 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa 163 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb 164 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc 165 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd 166 /* Bit 23 MUST be set in RC mode. */ 167 #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) 168 #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) 169 #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ 170 (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) 171 172 /* Region r Outbound PCIe Descriptor Register 1 */ 173 #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ 174 (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) 175 #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) 176 #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ 177 ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) 178 179 /* Region r AXI Region Base Address Register 0 */ 180 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ 181 (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) 182 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) 183 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ 184 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) 185 186 /* Region r AXI Region Base Address Register 1 */ 187 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ 188 (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) 189 190 /* Root Port BAR Inbound PCIe to AXI Address Translation Register */ 191 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ 192 (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) 193 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) 194 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ 195 (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) 196 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ 197 (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) 198 199 /* AXI link down register */ 200 #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) 201 202 /* LTSSM Capabilities register */ 203 #define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054) 204 #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1) 205 #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1 206 #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \ 207 (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \ 208 CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) 209 210 enum cdns_pcie_rp_bar { 211 RP_BAR_UNDEFINED = -1, 212 RP_BAR0, 213 RP_BAR1, 214 RP_NO_BAR 215 }; 216 217 #define CDNS_PCIE_RP_MAX_IB 0x3 218 #define CDNS_PCIE_MAX_OB 32 219 220 struct cdns_pcie_rp_ib_bar { 221 u64 size; 222 bool free; 223 }; 224 225 /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ 226 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ 227 (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) 228 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ 229 (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) 230 231 /* Normal/Vendor specific message access: offset inside some outbound region */ 232 #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) 233 #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ 234 (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) 235 #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) 236 #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ 237 (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) 238 #define CDNS_PCIE_MSG_NO_DATA BIT(16) 239 240 struct cdns_pcie; 241 242 enum cdns_pcie_msg_code { 243 MSG_CODE_ASSERT_INTA = 0x20, 244 MSG_CODE_ASSERT_INTB = 0x21, 245 MSG_CODE_ASSERT_INTC = 0x22, 246 MSG_CODE_ASSERT_INTD = 0x23, 247 MSG_CODE_DEASSERT_INTA = 0x24, 248 MSG_CODE_DEASSERT_INTB = 0x25, 249 MSG_CODE_DEASSERT_INTC = 0x26, 250 MSG_CODE_DEASSERT_INTD = 0x27, 251 }; 252 253 enum cdns_pcie_msg_routing { 254 /* Route to Root Complex */ 255 MSG_ROUTING_TO_RC, 256 257 /* Use Address Routing */ 258 MSG_ROUTING_BY_ADDR, 259 260 /* Use ID Routing */ 261 MSG_ROUTING_BY_ID, 262 263 /* Route as Broadcast Message from Root Complex */ 264 MSG_ROUTING_BCAST, 265 266 /* Local message; terminate at receiver (INTx messages) */ 267 MSG_ROUTING_LOCAL, 268 269 /* Gather & route to Root Complex (PME_TO_Ack message) */ 270 MSG_ROUTING_GATHER, 271 }; 272 273 struct cdns_pcie_ops { 274 int (*start_link)(struct cdns_pcie *pcie); 275 void (*stop_link)(struct cdns_pcie *pcie); 276 bool (*link_up)(struct cdns_pcie *pcie); 277 u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); 278 }; 279 280 /** 281 * struct cdns_pcie - private data for Cadence PCIe controller drivers 282 * @reg_base: IO mapped register base 283 * @mem_res: start/end offsets in the physical system memory to map PCI accesses 284 * @dev: PCIe controller 285 * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. 286 * @phy_count: number of supported PHY devices 287 * @phy: list of pointers to specific PHY control blocks 288 * @link: list of pointers to corresponding device link representations 289 * @ops: Platform-specific ops to control various inputs from Cadence PCIe 290 * wrapper 291 */ 292 struct cdns_pcie { 293 void __iomem *reg_base; 294 struct resource *mem_res; 295 struct device *dev; 296 bool is_rc; 297 int phy_count; 298 struct phy **phy; 299 struct device_link **link; 300 const struct cdns_pcie_ops *ops; 301 }; 302 303 /** 304 * struct cdns_pcie_rc - private data for this PCIe Root Complex driver 305 * @pcie: Cadence PCIe controller 306 * @dev: pointer to PCIe device 307 * @cfg_res: start/end offsets in the physical system memory to map PCI 308 * configuration space accesses 309 * @cfg_base: IO mapped window to access the PCI configuration space of a 310 * single function at a time 311 * @vendor_id: PCI vendor ID 312 * @device_id: PCI device ID 313 * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or 314 * available 315 * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2 316 * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk 317 */ 318 struct cdns_pcie_rc { 319 struct cdns_pcie pcie; 320 struct resource *cfg_res; 321 void __iomem *cfg_base; 322 u32 vendor_id; 323 u32 device_id; 324 bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; 325 unsigned int quirk_retrain_flag:1; 326 unsigned int quirk_detect_quiet_flag:1; 327 }; 328 329 /** 330 * struct cdns_pcie_epf - Structure to hold info about endpoint function 331 * @epf: Info about virtual functions attached to the physical function 332 * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers 333 */ 334 struct cdns_pcie_epf { 335 struct cdns_pcie_epf *epf; 336 struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; 337 }; 338 339 /** 340 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver 341 * @pcie: Cadence PCIe controller 342 * @max_regions: maximum number of regions supported by hardware 343 * @ob_region_map: bitmask of mapped outbound regions 344 * @ob_addr: base addresses in the AXI bus where the outbound regions start 345 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ 346 * dedicated outbound regions is mapped. 347 * @irq_cpu_addr: base address in the CPU space where a write access triggers 348 * the sending of a memory write (MSI) / normal message (legacy 349 * IRQ) TLP through the PCIe bus. 350 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ 351 * dedicated outbound region. 352 * @irq_pci_fn: the latest PCI function that has updated the mapping of 353 * the MSI/legacy IRQ dedicated outbound region. 354 * @irq_pending: bitmask of asserted legacy IRQs. 355 * @lock: spin lock to disable interrupts while modifying PCIe controller 356 * registers fields (RMW) accessible by both remote RC and EP to 357 * minimize time between read and write 358 * @epf: Structure to hold info about endpoint function 359 * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk 360 */ 361 struct cdns_pcie_ep { 362 struct cdns_pcie pcie; 363 u32 max_regions; 364 unsigned long ob_region_map; 365 phys_addr_t *ob_addr; 366 phys_addr_t irq_phys_addr; 367 void __iomem *irq_cpu_addr; 368 u64 irq_pci_addr; 369 u8 irq_pci_fn; 370 u8 irq_pending; 371 /* protect writing to PCI_STATUS while raising legacy interrupts */ 372 spinlock_t lock; 373 struct cdns_pcie_epf *epf; 374 unsigned int quirk_detect_quiet_flag:1; 375 }; 376 377 378 /* Register access */ 379 static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) 380 { 381 writel(value, pcie->reg_base + reg); 382 } 383 384 static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) 385 { 386 return readl(pcie->reg_base + reg); 387 } 388 389 static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size) 390 { 391 void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); 392 unsigned int offset = (unsigned long)addr & 0x3; 393 u32 val = readl(aligned_addr); 394 395 if (!IS_ALIGNED((uintptr_t)addr, size)) { 396 pr_warn("Address %p and size %d are not aligned\n", addr, size); 397 return 0; 398 } 399 400 if (size > 2) 401 return val; 402 403 return (val >> (8 * offset)) & ((1 << (size * 8)) - 1); 404 } 405 406 static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value) 407 { 408 void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); 409 unsigned int offset = (unsigned long)addr & 0x3; 410 u32 mask; 411 u32 val; 412 413 if (!IS_ALIGNED((uintptr_t)addr, size)) { 414 pr_warn("Address %p and size %d are not aligned\n", addr, size); 415 return; 416 } 417 418 if (size > 2) { 419 writel(value, addr); 420 return; 421 } 422 423 mask = ~(((1 << (size * 8)) - 1) << (offset * 8)); 424 val = readl(aligned_addr) & mask; 425 val |= value << (offset * 8); 426 writel(val, aligned_addr); 427 } 428 429 /* Root Port register access */ 430 static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, 431 u32 reg, u8 value) 432 { 433 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; 434 435 cdns_pcie_write_sz(addr, 0x1, value); 436 } 437 438 static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, 439 u32 reg, u16 value) 440 { 441 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; 442 443 cdns_pcie_write_sz(addr, 0x2, value); 444 } 445 446 static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg) 447 { 448 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; 449 450 return cdns_pcie_read_sz(addr, 0x2); 451 } 452 453 /* Endpoint Function register access */ 454 static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, 455 u32 reg, u8 value) 456 { 457 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; 458 459 cdns_pcie_write_sz(addr, 0x1, value); 460 } 461 462 static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, 463 u32 reg, u16 value) 464 { 465 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; 466 467 cdns_pcie_write_sz(addr, 0x2, value); 468 } 469 470 static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, 471 u32 reg, u32 value) 472 { 473 writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); 474 } 475 476 static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) 477 { 478 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; 479 480 return cdns_pcie_read_sz(addr, 0x2); 481 } 482 483 static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) 484 { 485 return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); 486 } 487 488 static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) 489 { 490 if (pcie->ops->start_link) 491 return pcie->ops->start_link(pcie); 492 493 return 0; 494 } 495 496 static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) 497 { 498 if (pcie->ops->stop_link) 499 pcie->ops->stop_link(pcie); 500 } 501 502 static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) 503 { 504 if (pcie->ops->link_up) 505 return pcie->ops->link_up(pcie); 506 507 return true; 508 } 509 510 #ifdef CONFIG_PCIE_CADENCE_HOST 511 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); 512 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, 513 int where); 514 #else 515 static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) 516 { 517 return 0; 518 } 519 520 static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, 521 int where) 522 { 523 return NULL; 524 } 525 #endif 526 527 #ifdef CONFIG_PCIE_CADENCE_EP 528 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep); 529 #else 530 static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) 531 { 532 return 0; 533 } 534 #endif 535 536 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie); 537 538 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, 539 u32 r, bool is_io, 540 u64 cpu_addr, u64 pci_addr, size_t size); 541 542 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, 543 u8 busnr, u8 fn, 544 u32 r, u64 cpu_addr); 545 546 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); 547 void cdns_pcie_disable_phy(struct cdns_pcie *pcie); 548 int cdns_pcie_enable_phy(struct cdns_pcie *pcie); 549 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie); 550 extern const struct dev_pm_ops cdns_pcie_pm_ops; 551 552 #endif /* _PCIE_CADENCE_H */ 553