1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCIe Gen4 host controller driver for NXP Layerscape SoCs 4 * 5 * Copyright 2019-2020 NXP 6 * 7 * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/interrupt.h> 12 #include <linux/init.h> 13 #include <linux/of_pci.h> 14 #include <linux/of_platform.h> 15 #include <linux/of_irq.h> 16 #include <linux/of_address.h> 17 #include <linux/pci.h> 18 #include <linux/platform_device.h> 19 #include <linux/resource.h> 20 #include <linux/mfd/syscon.h> 21 #include <linux/regmap.h> 22 23 #include "pcie-mobiveil.h" 24 25 /* LUT and PF control registers */ 26 #define PCIE_LUT_OFF 0x80000 27 #define PCIE_PF_OFF 0xc0000 28 #define PCIE_PF_INT_STAT 0x18 29 #define PF_INT_STAT_PABRST BIT(31) 30 31 #define PCIE_PF_DBG 0x7fc 32 #define PF_DBG_LTSSM_MASK 0x3f 33 #define PF_DBG_LTSSM_L0 0x2d /* L0 state */ 34 #define PF_DBG_WE BIT(31) 35 #define PF_DBG_PABR BIT(27) 36 37 #define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev) 38 39 struct ls_pcie_g4 { 40 struct mobiveil_pcie pci; 41 struct delayed_work dwork; 42 int irq; 43 }; 44 45 static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off) 46 { 47 return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off); 48 } 49 50 static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie, 51 u32 off, u32 val) 52 { 53 iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off); 54 } 55 56 static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off) 57 { 58 return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); 59 } 60 61 static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie, 62 u32 off, u32 val) 63 { 64 iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); 65 } 66 67 static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci) 68 { 69 struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci); 70 u32 state; 71 72 state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); 73 state = state & PF_DBG_LTSSM_MASK; 74 75 if (state == PF_DBG_LTSSM_L0) 76 return 1; 77 78 return 0; 79 } 80 81 static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie) 82 { 83 struct mobiveil_pcie *mv_pci = &pcie->pci; 84 85 mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB); 86 } 87 88 static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie) 89 { 90 struct mobiveil_pcie *mv_pci = &pcie->pci; 91 u32 val; 92 93 /* Clear the interrupt status */ 94 mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT); 95 96 val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET | 97 PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC; 98 mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB); 99 } 100 101 static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie) 102 { 103 struct mobiveil_pcie *mv_pci = &pcie->pci; 104 struct device *dev = &mv_pci->pdev->dev; 105 u32 val, act_stat; 106 int to = 100; 107 108 /* Poll for pab_csb_reset to set and PAB activity to clear */ 109 do { 110 usleep_range(10, 15); 111 val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT); 112 act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT); 113 } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--); 114 if (to < 0) { 115 dev_err(dev, "Poll PABRST&PABACT timeout\n"); 116 return -EIO; 117 } 118 119 /* clear PEX_RESET bit in PEX_PF0_DBG register */ 120 val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); 121 val |= PF_DBG_WE; 122 ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); 123 124 val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); 125 val |= PF_DBG_PABR; 126 ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); 127 128 val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG); 129 val &= ~PF_DBG_WE; 130 ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val); 131 132 mobiveil_host_init(mv_pci, true); 133 134 to = 100; 135 while (!ls_pcie_g4_link_up(mv_pci) && to--) 136 usleep_range(200, 250); 137 if (to < 0) { 138 dev_err(dev, "PCIe link training timeout\n"); 139 return -EIO; 140 } 141 142 return 0; 143 } 144 145 static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id) 146 { 147 struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id; 148 struct mobiveil_pcie *mv_pci = &pcie->pci; 149 u32 val; 150 151 val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT); 152 if (!val) 153 return IRQ_NONE; 154 155 if (val & PAB_INTP_RESET) { 156 ls_pcie_g4_disable_interrupt(pcie); 157 schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1)); 158 } 159 160 mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT); 161 162 return IRQ_HANDLED; 163 } 164 165 static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) 166 { 167 struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci); 168 struct platform_device *pdev = mv_pci->pdev; 169 struct device *dev = &pdev->dev; 170 int ret; 171 172 pcie->irq = platform_get_irq_byname(pdev, "intr"); 173 if (pcie->irq < 0) { 174 dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq); 175 return pcie->irq; 176 } 177 ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr, 178 IRQF_SHARED, pdev->name, pcie); 179 if (ret) { 180 dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret); 181 return ret; 182 } 183 184 return 0; 185 } 186 187 static void ls_pcie_g4_reset(struct work_struct *work) 188 { 189 struct delayed_work *dwork = container_of(work, struct delayed_work, 190 work); 191 struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork); 192 struct mobiveil_pcie *mv_pci = &pcie->pci; 193 u16 ctrl; 194 195 ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL); 196 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 197 mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL); 198 199 if (!ls_pcie_g4_reinit_hw(pcie)) 200 return; 201 202 ls_pcie_g4_enable_interrupt(pcie); 203 } 204 205 static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = { 206 .interrupt_init = ls_pcie_g4_interrupt_init, 207 }; 208 209 static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = { 210 .link_up = ls_pcie_g4_link_up, 211 }; 212 213 static int __init ls_pcie_g4_probe(struct platform_device *pdev) 214 { 215 struct device *dev = &pdev->dev; 216 struct pci_host_bridge *bridge; 217 struct mobiveil_pcie *mv_pci; 218 struct ls_pcie_g4 *pcie; 219 struct device_node *np = dev->of_node; 220 int ret; 221 222 if (!of_parse_phandle(np, "msi-parent", 0)) { 223 dev_err(dev, "Failed to find msi-parent\n"); 224 return -EINVAL; 225 } 226 227 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); 228 if (!bridge) 229 return -ENOMEM; 230 231 pcie = pci_host_bridge_priv(bridge); 232 mv_pci = &pcie->pci; 233 234 mv_pci->pdev = pdev; 235 mv_pci->ops = &ls_pcie_g4_pab_ops; 236 mv_pci->rp.ops = &ls_pcie_g4_rp_ops; 237 mv_pci->rp.bridge = bridge; 238 239 platform_set_drvdata(pdev, pcie); 240 241 INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset); 242 243 ret = mobiveil_pcie_host_probe(mv_pci); 244 if (ret) { 245 dev_err(dev, "Fail to probe\n"); 246 return ret; 247 } 248 249 ls_pcie_g4_enable_interrupt(pcie); 250 251 return 0; 252 } 253 254 static const struct of_device_id ls_pcie_g4_of_match[] = { 255 { .compatible = "fsl,lx2160a-pcie", }, 256 { }, 257 }; 258 259 static struct platform_driver ls_pcie_g4_driver = { 260 .driver = { 261 .name = "layerscape-pcie-gen4", 262 .of_match_table = ls_pcie_g4_of_match, 263 .suppress_bind_attrs = true, 264 }, 265 }; 266 267 builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe); 268