1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #include <linux/kernel.h> 7 8 #include "pcie-cadence.h" 9 10 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, 11 u32 r, bool is_io, 12 u64 cpu_addr, u64 pci_addr, size_t size) 13 { 14 /* 15 * roundup_pow_of_two() returns an unsigned long, which is not suited 16 * for 64bit values. 17 */ 18 u64 sz = 1ULL << fls64(size - 1); 19 int nbits = ilog2(sz); 20 u32 addr0, addr1, desc0, desc1; 21 22 if (nbits < 8) 23 nbits = 8; 24 25 /* Set the PCI address */ 26 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | 27 (lower_32_bits(pci_addr) & GENMASK(31, 8)); 28 addr1 = upper_32_bits(pci_addr); 29 30 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0); 31 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1); 32 33 /* Set the PCIe header descriptor */ 34 if (is_io) 35 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO; 36 else 37 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM; 38 desc1 = 0; 39 40 /* 41 * Whatever Bit [23] is set or not inside DESC0 register of the outbound 42 * PCIe descriptor, the PCI function number must be set into 43 * Bits [26:24] of DESC0 anyway. 44 * 45 * In Root Complex mode, the function number is always 0 but in Endpoint 46 * mode, the PCIe controller may support more than one function. This 47 * function number needs to be set properly into the outbound PCIe 48 * descriptor. 49 * 50 * Besides, setting Bit [23] is mandatory when in Root Complex mode: 51 * then the driver must provide the bus, resp. device, number in 52 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function 53 * number, the device number is always 0 in Root Complex mode. 54 * 55 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence 56 * the PCIe controller will use the captured values for the bus and 57 * device numbers. 58 */ 59 if (pcie->is_rc) { 60 /* The device and function numbers are always 0. */ 61 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | 62 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); 63 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); 64 } else { 65 /* 66 * Use captured values for bus and device numbers but still 67 * need to set the function number. 68 */ 69 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); 70 } 71 72 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); 73 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); 74 75 /* Set the CPU address */ 76 if (pcie->ops->cpu_addr_fixup) 77 cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); 78 79 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | 80 (lower_32_bits(cpu_addr) & GENMASK(31, 8)); 81 addr1 = upper_32_bits(cpu_addr); 82 83 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); 84 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); 85 } 86 87 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, 88 u8 busnr, u8 fn, 89 u32 r, u64 cpu_addr) 90 { 91 u32 addr0, addr1, desc0, desc1; 92 93 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; 94 desc1 = 0; 95 96 /* See cdns_pcie_set_outbound_region() comments above. */ 97 if (pcie->is_rc) { 98 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | 99 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); 100 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); 101 } else { 102 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); 103 } 104 105 /* Set the CPU address */ 106 if (pcie->ops->cpu_addr_fixup) 107 cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); 108 109 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | 110 (lower_32_bits(cpu_addr) & GENMASK(31, 8)); 111 addr1 = upper_32_bits(cpu_addr); 112 113 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); 114 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); 115 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); 116 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); 117 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); 118 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); 119 } 120 121 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) 122 { 123 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); 124 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); 125 126 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0); 127 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0); 128 129 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); 130 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); 131 } 132 133 void cdns_pcie_disable_phy(struct cdns_pcie *pcie) 134 { 135 int i = pcie->phy_count; 136 137 while (i--) { 138 phy_power_off(pcie->phy[i]); 139 phy_exit(pcie->phy[i]); 140 } 141 } 142 143 int cdns_pcie_enable_phy(struct cdns_pcie *pcie) 144 { 145 int ret; 146 int i; 147 148 for (i = 0; i < pcie->phy_count; i++) { 149 ret = phy_init(pcie->phy[i]); 150 if (ret < 0) 151 goto err_phy; 152 153 ret = phy_power_on(pcie->phy[i]); 154 if (ret < 0) { 155 phy_exit(pcie->phy[i]); 156 goto err_phy; 157 } 158 } 159 160 return 0; 161 162 err_phy: 163 while (--i >= 0) { 164 phy_power_off(pcie->phy[i]); 165 phy_exit(pcie->phy[i]); 166 } 167 168 return ret; 169 } 170 171 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) 172 { 173 struct device_node *np = dev->of_node; 174 int phy_count; 175 struct phy **phy; 176 struct device_link **link; 177 int i; 178 int ret; 179 const char *name; 180 181 phy_count = of_property_count_strings(np, "phy-names"); 182 if (phy_count < 1) { 183 dev_err(dev, "no phy-names. PHY will not be initialized\n"); 184 pcie->phy_count = 0; 185 return 0; 186 } 187 188 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 189 if (!phy) 190 return -ENOMEM; 191 192 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 193 if (!link) 194 return -ENOMEM; 195 196 for (i = 0; i < phy_count; i++) { 197 of_property_read_string_index(np, "phy-names", i, &name); 198 phy[i] = devm_phy_get(dev, name); 199 if (IS_ERR(phy[i])) { 200 ret = PTR_ERR(phy[i]); 201 goto err_phy; 202 } 203 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 204 if (!link[i]) { 205 devm_phy_put(dev, phy[i]); 206 ret = -EINVAL; 207 goto err_phy; 208 } 209 } 210 211 pcie->phy_count = phy_count; 212 pcie->phy = phy; 213 pcie->link = link; 214 215 ret = cdns_pcie_enable_phy(pcie); 216 if (ret) 217 goto err_phy; 218 219 return 0; 220 221 err_phy: 222 while (--i >= 0) { 223 device_link_del(link[i]); 224 devm_phy_put(dev, phy[i]); 225 } 226 227 return ret; 228 } 229 230 #ifdef CONFIG_PM_SLEEP 231 static int cdns_pcie_suspend_noirq(struct device *dev) 232 { 233 struct cdns_pcie *pcie = dev_get_drvdata(dev); 234 235 cdns_pcie_disable_phy(pcie); 236 237 return 0; 238 } 239 240 static int cdns_pcie_resume_noirq(struct device *dev) 241 { 242 struct cdns_pcie *pcie = dev_get_drvdata(dev); 243 int ret; 244 245 ret = cdns_pcie_enable_phy(pcie); 246 if (ret) { 247 dev_err(dev, "failed to enable phy\n"); 248 return ret; 249 } 250 251 return 0; 252 } 253 #endif 254 255 const struct dev_pm_ops cdns_pcie_pm_ops = { 256 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, 257 cdns_pcie_resume_noirq) 258 }; 259