1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Cavium, Inc. 4 */ 5 6 #include <linux/acpi.h> 7 #include <linux/module.h> 8 #include <linux/interrupt.h> 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/phy.h> 13 #include <linux/of.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 17 #include "nic.h" 18 #include "thunder_bgx.h" 19 20 #define DRV_NAME "thunder_xcv" 21 #define DRV_VERSION "1.0" 22 23 /* Register offsets */ 24 #define XCV_RESET 0x00 25 #define PORT_EN BIT_ULL(63) 26 #define CLK_RESET BIT_ULL(15) 27 #define DLL_RESET BIT_ULL(11) 28 #define COMP_EN BIT_ULL(7) 29 #define TX_PKT_RESET BIT_ULL(3) 30 #define TX_DATA_RESET BIT_ULL(2) 31 #define RX_PKT_RESET BIT_ULL(1) 32 #define RX_DATA_RESET BIT_ULL(0) 33 #define XCV_DLL_CTL 0x10 34 #define CLKRX_BYP BIT_ULL(23) 35 #define CLKTX_BYP BIT_ULL(15) 36 #define XCV_COMP_CTL 0x20 37 #define DRV_BYP BIT_ULL(63) 38 #define XCV_CTL 0x30 39 #define XCV_INT 0x40 40 #define XCV_INT_W1S 0x48 41 #define XCV_INT_ENA_W1C 0x50 42 #define XCV_INT_ENA_W1S 0x58 43 #define XCV_INBND_STATUS 0x80 44 #define XCV_BATCH_CRD_RET 0x100 45 46 struct xcv { 47 void __iomem *reg_base; 48 struct pci_dev *pdev; 49 }; 50 51 static struct xcv *xcv; 52 53 /* Supported devices */ 54 static const struct pci_device_id xcv_id_table[] = { 55 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) }, 56 { 0, } /* end of table */ 57 }; 58 59 MODULE_AUTHOR("Cavium Inc"); 60 MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver"); 61 MODULE_LICENSE("GPL v2"); 62 MODULE_VERSION(DRV_VERSION); 63 MODULE_DEVICE_TABLE(pci, xcv_id_table); 64 65 void xcv_init_hw(void) 66 { 67 u64 cfg; 68 69 /* Take DLL out of reset */ 70 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 71 cfg &= ~DLL_RESET; 72 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 73 74 /* Take clock tree out of reset */ 75 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 76 cfg &= ~CLK_RESET; 77 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 78 /* Wait for DLL to lock */ 79 msleep(1); 80 81 /* Configure DLL - enable or bypass 82 * TX no bypass, RX bypass 83 */ 84 cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL); 85 cfg &= ~0xFF03; 86 cfg |= CLKRX_BYP; 87 writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL); 88 89 /* Enable compensation controller and force the 90 * write to be visible to HW by readig back. 91 */ 92 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 93 cfg |= COMP_EN; 94 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 95 readq_relaxed(xcv->reg_base + XCV_RESET); 96 /* Wait for compensation state machine to lock */ 97 msleep(10); 98 99 /* enable the XCV block */ 100 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 101 cfg |= PORT_EN; 102 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 103 104 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 105 cfg |= CLK_RESET; 106 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 107 } 108 EXPORT_SYMBOL(xcv_init_hw); 109 110 void xcv_setup_link(bool link_up, int link_speed) 111 { 112 u64 cfg; 113 int speed = 2; 114 115 if (!xcv) { 116 pr_err("XCV init not done, probe may have failed\n"); 117 return; 118 } 119 120 if (link_speed == 100) 121 speed = 1; 122 else if (link_speed == 10) 123 speed = 0; 124 125 if (link_up) { 126 /* set operating speed */ 127 cfg = readq_relaxed(xcv->reg_base + XCV_CTL); 128 cfg &= ~0x03; 129 cfg |= speed; 130 writeq_relaxed(cfg, xcv->reg_base + XCV_CTL); 131 132 /* Reset datapaths */ 133 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 134 cfg |= TX_DATA_RESET | RX_DATA_RESET; 135 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 136 137 /* Enable the packet flow */ 138 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 139 cfg |= TX_PKT_RESET | RX_PKT_RESET; 140 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 141 142 /* Return credits to RGX */ 143 writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET); 144 } else { 145 /* Disable packet flow */ 146 cfg = readq_relaxed(xcv->reg_base + XCV_RESET); 147 cfg &= ~(TX_PKT_RESET | RX_PKT_RESET); 148 writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); 149 readq_relaxed(xcv->reg_base + XCV_RESET); 150 } 151 } 152 EXPORT_SYMBOL(xcv_setup_link); 153 154 static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 155 { 156 int err; 157 struct device *dev = &pdev->dev; 158 159 xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL); 160 if (!xcv) 161 return -ENOMEM; 162 xcv->pdev = pdev; 163 164 pci_set_drvdata(pdev, xcv); 165 166 err = pci_enable_device(pdev); 167 if (err) { 168 dev_err(dev, "Failed to enable PCI device\n"); 169 goto err_kfree; 170 } 171 172 err = pci_request_regions(pdev, DRV_NAME); 173 if (err) { 174 dev_err(dev, "PCI request regions failed 0x%x\n", err); 175 goto err_disable_device; 176 } 177 178 /* MAP configuration registers */ 179 xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 180 if (!xcv->reg_base) { 181 dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n"); 182 err = -ENOMEM; 183 goto err_release_regions; 184 } 185 186 return 0; 187 188 err_release_regions: 189 pci_release_regions(pdev); 190 err_disable_device: 191 pci_disable_device(pdev); 192 err_kfree: 193 devm_kfree(dev, xcv); 194 xcv = NULL; 195 return err; 196 } 197 198 static void xcv_remove(struct pci_dev *pdev) 199 { 200 struct device *dev = &pdev->dev; 201 202 if (xcv) { 203 devm_kfree(dev, xcv); 204 xcv = NULL; 205 } 206 207 pci_release_regions(pdev); 208 pci_disable_device(pdev); 209 } 210 211 static struct pci_driver xcv_driver = { 212 .name = DRV_NAME, 213 .id_table = xcv_id_table, 214 .probe = xcv_probe, 215 .remove = xcv_remove, 216 }; 217 218 static int __init xcv_init_module(void) 219 { 220 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 221 222 return pci_register_driver(&xcv_driver); 223 } 224 225 static void __exit xcv_cleanup_module(void) 226 { 227 pci_unregister_driver(&xcv_driver); 228 } 229 230 module_init(xcv_init_module); 231 module_exit(xcv_cleanup_module); 232