1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/netdevice.h> 10 #include <linux/etherdevice.h> 11 12 #include "octep_config.h" 13 #include "octep_main.h" 14 #include "octep_regs_cn9k_pf.h" 15 16 #define CTRL_MBOX_MAX_PF 128 17 #define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF)) 18 19 #define FW_HB_INTERVAL_IN_SECS 1 20 #define FW_HB_MISS_COUNT 10 21 22 /* Names of Hardware non-queue generic interrupts */ 23 static char *cn93_non_ioq_msix_names[] = { 24 "epf_ire_rint", 25 "epf_ore_rint", 26 "epf_vfire_rint0", 27 "epf_vfire_rint1", 28 "epf_vfore_rint0", 29 "epf_vfore_rint1", 30 "epf_mbox_rint0", 31 "epf_mbox_rint1", 32 "epf_oei_rint", 33 "epf_dma_rint", 34 "epf_dma_vf_rint0", 35 "epf_dma_vf_rint1", 36 "epf_pp_vf_rint0", 37 "epf_pp_vf_rint1", 38 "epf_misc_rint", 39 "epf_rsvd", 40 }; 41 42 /* Dump useful hardware CSRs for debug purpose */ 43 static void cn93_dump_regs(struct octep_device *oct, int qno) 44 { 45 struct device *dev = &oct->pdev->dev; 46 47 dev_info(dev, "IQ-%d register dump\n", qno); 48 dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", 49 qno, CN93_SDP_R_IN_INSTR_DBELL(qno), 50 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); 51 dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", 52 qno, CN93_SDP_R_IN_CONTROL(qno), 53 octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); 54 dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", 55 qno, CN93_SDP_R_IN_ENABLE(qno), 56 octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); 57 dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", 58 qno, CN93_SDP_R_IN_INSTR_BADDR(qno), 59 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); 60 dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", 61 qno, CN93_SDP_R_IN_INSTR_RSIZE(qno), 62 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); 63 dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", 64 qno, CN93_SDP_R_IN_CNTS(qno), 65 octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); 66 dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", 67 qno, CN93_SDP_R_IN_INT_LEVELS(qno), 68 octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); 69 dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", 70 qno, CN93_SDP_R_IN_PKT_CNT(qno), 71 octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); 72 dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", 73 qno, CN93_SDP_R_IN_BYTE_CNT(qno), 74 octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno))); 75 76 dev_info(dev, "OQ-%d register dump\n", qno); 77 dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", 78 qno, CN93_SDP_R_OUT_SLIST_DBELL(qno), 79 octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno))); 80 dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", 81 qno, CN93_SDP_R_OUT_CONTROL(qno), 82 octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno))); 83 dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", 84 qno, CN93_SDP_R_OUT_ENABLE(qno), 85 octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno))); 86 dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", 87 qno, CN93_SDP_R_OUT_SLIST_BADDR(qno), 88 octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno))); 89 dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", 90 qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno), 91 octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno))); 92 dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", 93 qno, CN93_SDP_R_OUT_CNTS(qno), 94 octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno))); 95 dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", 96 qno, CN93_SDP_R_OUT_INT_LEVELS(qno), 97 octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno))); 98 dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", 99 qno, CN93_SDP_R_OUT_PKT_CNT(qno), 100 octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno))); 101 dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", 102 qno, CN93_SDP_R_OUT_BYTE_CNT(qno), 103 octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno))); 104 dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", 105 qno, CN93_SDP_R_ERR_TYPE(qno), 106 octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno))); 107 } 108 109 /* Reset Hardware Tx queue */ 110 static int cn93_reset_iq(struct octep_device *oct, int q_no) 111 { 112 struct octep_config *conf = oct->conf; 113 u64 val = 0ULL; 114 115 dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); 116 117 /* Get absolute queue number */ 118 q_no += conf->pf_ring_cfg.srn; 119 120 /* Disable the Tx/Instruction Ring */ 121 octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); 122 123 /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ 124 octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); 125 octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); 126 octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); 127 octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); 128 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); 129 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); 130 131 val = 0xFFFFFFFF; 132 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val); 133 134 return 0; 135 } 136 137 /* Reset Hardware Rx queue */ 138 static void cn93_reset_oq(struct octep_device *oct, int q_no) 139 { 140 u64 val = 0ULL; 141 142 q_no += CFG_GET_PORTS_PF_SRN(oct->conf); 143 144 /* Disable Output (Rx) Ring */ 145 octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val); 146 147 /* Clear count CSRs */ 148 val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no)); 149 octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val); 150 151 octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); 152 octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); 153 } 154 155 /* Reset all hardware Tx/Rx queues */ 156 static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) 157 { 158 struct pci_dev *pdev = oct->pdev; 159 int q; 160 161 dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n"); 162 163 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 164 cn93_reset_iq(oct, q); 165 cn93_reset_oq(oct, q); 166 } 167 } 168 169 /* Initialize windowed addresses to access some hardware registers */ 170 static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) 171 { 172 u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; 173 174 oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64); 175 oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64); 176 oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64); 177 oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64); 178 } 179 180 /* Configure Hardware mapping: inform hardware which rings belong to PF. */ 181 static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) 182 { 183 struct octep_config *conf = oct->conf; 184 struct pci_dev *pdev = oct->pdev; 185 u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); 186 int q; 187 188 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { 189 u64 regval = 0; 190 191 if (oct->pcie_port) 192 regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS; 193 194 octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval); 195 196 regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q)); 197 dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", 198 CN93_SDP_EPVF_RING(pf_srn + q), regval); 199 } 200 } 201 202 /* Initialize configuration limits and initial active config 93xx PF. */ 203 static void octep_init_config_cn93_pf(struct octep_device *oct) 204 { 205 struct octep_config *conf = oct->conf; 206 struct pci_dev *pdev = oct->pdev; 207 u8 link = 0; 208 u64 val; 209 int pos; 210 211 /* Read ring configuration: 212 * PF ring count, number of VFs and rings per VF supported 213 */ 214 val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO); 215 conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val); 216 conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; 217 conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val); 218 conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; 219 conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val); 220 221 val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); 222 conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val); 223 conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val); 224 conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; 225 dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", 226 conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, 227 conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); 228 229 conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; 230 conf->iq.instr_type = OCTEP_64BYTE_INSTR; 231 conf->iq.pkind = 0; 232 conf->iq.db_min = OCTEP_DB_MIN; 233 conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; 234 235 conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; 236 conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; 237 conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; 238 conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; 239 conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; 240 241 conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR; 242 conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; 243 conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names; 244 245 pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV); 246 if (pos) { 247 pci_read_config_byte(oct->pdev, 248 pos + PCI_SRIOV_FUNC_LINK, 249 &link); 250 link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link); 251 } 252 conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + 253 (0x400000ull * 7) + 254 (link * CTRL_MBOX_SZ); 255 256 conf->hb_interval = FW_HB_INTERVAL_IN_SECS; 257 conf->max_hb_miss_cnt = FW_HB_MISS_COUNT; 258 259 } 260 261 /* Setup registers for a hardware Tx Queue */ 262 static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) 263 { 264 struct octep_iq *iq = oct->iq[iq_no]; 265 u32 reset_instr_cnt; 266 u64 reg_val; 267 268 iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 269 reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); 270 271 /* wait for IDLE to set to 1 */ 272 if (!(reg_val & CN93_R_IN_CTL_IDLE)) { 273 do { 274 reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); 275 } while (!(reg_val & CN93_R_IN_CTL_IDLE)); 276 } 277 278 reg_val |= CN93_R_IN_CTL_RDSIZE; 279 reg_val |= CN93_R_IN_CTL_IS_64B; 280 reg_val |= CN93_R_IN_CTL_ESR; 281 octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val); 282 283 /* Write the start of the input queue's ring and its size */ 284 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no), 285 iq->desc_ring_dma); 286 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no), 287 iq->max_count); 288 289 /* Remember the doorbell & instruction count register addr 290 * for this queue 291 */ 292 iq->doorbell_reg = oct->mmio[0].hw_addr + 293 CN93_SDP_R_IN_INSTR_DBELL(iq_no); 294 iq->inst_cnt_reg = oct->mmio[0].hw_addr + 295 CN93_SDP_R_IN_CNTS(iq_no); 296 iq->intr_lvl_reg = oct->mmio[0].hw_addr + 297 CN93_SDP_R_IN_INT_LEVELS(iq_no); 298 299 /* Store the current instruction counter (used in flush_iq calculation) */ 300 reset_instr_cnt = readl(iq->inst_cnt_reg); 301 writel(reset_instr_cnt, iq->inst_cnt_reg); 302 303 /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ 304 reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; 305 octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 306 } 307 308 /* Setup registers for a hardware Rx Queue */ 309 static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) 310 { 311 u64 reg_val; 312 u64 oq_ctl = 0ULL; 313 u32 time_threshold = 0; 314 struct octep_oq *oq = oct->oq[oq_no]; 315 316 oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 317 reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); 318 319 /* wait for IDLE to set to 1 */ 320 if (!(reg_val & CN93_R_OUT_CTL_IDLE)) { 321 do { 322 reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); 323 } while (!(reg_val & CN93_R_OUT_CTL_IDLE)); 324 } 325 326 reg_val &= ~(CN93_R_OUT_CTL_IMODE); 327 reg_val &= ~(CN93_R_OUT_CTL_ROR_P); 328 reg_val &= ~(CN93_R_OUT_CTL_NSR_P); 329 reg_val &= ~(CN93_R_OUT_CTL_ROR_I); 330 reg_val &= ~(CN93_R_OUT_CTL_NSR_I); 331 reg_val &= ~(CN93_R_OUT_CTL_ES_I); 332 reg_val &= ~(CN93_R_OUT_CTL_ROR_D); 333 reg_val &= ~(CN93_R_OUT_CTL_NSR_D); 334 reg_val &= ~(CN93_R_OUT_CTL_ES_D); 335 reg_val |= (CN93_R_OUT_CTL_ES_P); 336 337 octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val); 338 octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no), 339 oq->desc_ring_dma); 340 octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no), 341 oq->max_count); 342 343 oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); 344 oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) 345 oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) 346 octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl); 347 348 /* Get the mapped address of the pkt_sent and pkts_credit regs */ 349 oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); 350 oq->pkts_credit_reg = oct->mmio[0].hw_addr + 351 CN93_SDP_R_OUT_SLIST_DBELL(oq_no); 352 353 time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); 354 reg_val = ((u64)time_threshold << 32) | 355 CFG_GET_OQ_INTR_PKT(oct->conf); 356 octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 357 } 358 359 /* Setup registers for a PF mailbox */ 360 static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) 361 { 362 struct octep_mbox *mbox = oct->mbox[q_no]; 363 364 mbox->q_no = q_no; 365 366 /* PF mbox interrupt reg */ 367 mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0); 368 369 /* PF to VF DATA reg. PF writes into this reg */ 370 mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no); 371 372 /* VF to PF DATA reg. PF reads from this reg */ 373 mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); 374 } 375 376 /* Process non-ioq interrupts required to keep pf interface running. 377 * OEI_RINT is needed for control mailbox 378 */ 379 static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) 380 { 381 bool handled = false; 382 u64 reg0; 383 384 /* Check for OEI INTR */ 385 reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); 386 if (reg0) { 387 dev_info(&oct->pdev->dev, 388 "Received OEI_RINT intr: 0x%llx\n", 389 reg0); 390 octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0); 391 if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) 392 queue_work(octep_wq, &oct->ctrl_mbox_task); 393 else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) 394 atomic_set(&oct->hb_miss_cnt, 0); 395 396 handled = true; 397 } 398 399 return handled; 400 } 401 402 /* Interrupts handler for all non-queue generic interrupts. */ 403 static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) 404 { 405 struct octep_device *oct = (struct octep_device *)dev; 406 struct pci_dev *pdev = oct->pdev; 407 u64 reg_val = 0; 408 int i = 0; 409 410 /* Check for IRERR INTR */ 411 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT); 412 if (reg_val) { 413 dev_info(&pdev->dev, 414 "received IRERR_RINT intr: 0x%llx\n", reg_val); 415 octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val); 416 417 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 418 reg_val = octep_read_csr64(oct, 419 CN93_SDP_R_ERR_TYPE(i)); 420 if (reg_val) { 421 dev_info(&pdev->dev, 422 "Received err type on IQ-%d: 0x%llx\n", 423 i, reg_val); 424 octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), 425 reg_val); 426 } 427 } 428 goto irq_handled; 429 } 430 431 /* Check for ORERR INTR */ 432 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); 433 if (reg_val) { 434 dev_info(&pdev->dev, 435 "Received ORERR_RINT intr: 0x%llx\n", reg_val); 436 octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val); 437 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { 438 reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); 439 if (reg_val) { 440 dev_info(&pdev->dev, 441 "Received err type on OQ-%d: 0x%llx\n", 442 i, reg_val); 443 octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), 444 reg_val); 445 } 446 } 447 448 goto irq_handled; 449 } 450 451 /* Check for VFIRE INTR */ 452 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); 453 if (reg_val) { 454 dev_info(&pdev->dev, 455 "Received VFIRE_RINT intr: 0x%llx\n", reg_val); 456 octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); 457 goto irq_handled; 458 } 459 460 /* Check for VFORE INTR */ 461 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); 462 if (reg_val) { 463 dev_info(&pdev->dev, 464 "Received VFORE_RINT intr: 0x%llx\n", reg_val); 465 octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); 466 goto irq_handled; 467 } 468 469 /* Check for MBOX INTR and OEI INTR */ 470 if (octep_poll_non_ioq_interrupts_cn93_pf(oct)) 471 goto irq_handled; 472 473 /* Check for DMA INTR */ 474 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); 475 if (reg_val) { 476 octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); 477 goto irq_handled; 478 } 479 480 /* Check for DMA VF INTR */ 481 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); 482 if (reg_val) { 483 dev_info(&pdev->dev, 484 "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); 485 octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); 486 goto irq_handled; 487 } 488 489 /* Check for PPVF INTR */ 490 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); 491 if (reg_val) { 492 dev_info(&pdev->dev, 493 "Received PP_VF_RINT intr: 0x%llx\n", reg_val); 494 octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); 495 goto irq_handled; 496 } 497 498 /* Check for MISC INTR */ 499 reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); 500 if (reg_val) { 501 dev_info(&pdev->dev, 502 "Received MISC_RINT intr: 0x%llx\n", reg_val); 503 octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); 504 goto irq_handled; 505 } 506 507 dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); 508 irq_handled: 509 return IRQ_HANDLED; 510 } 511 512 /* Tx/Rx queue interrupt handler */ 513 static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) 514 { 515 struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; 516 struct octep_oq *oq = vector->oq; 517 518 napi_schedule_irqoff(oq->napi); 519 return IRQ_HANDLED; 520 } 521 522 /* soft reset of 93xx */ 523 static int octep_soft_reset_cn93_pf(struct octep_device *oct) 524 { 525 dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n"); 526 527 octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF); 528 529 /* Set core domain reset bit */ 530 OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1); 531 /* Wait for 100ms as Octeon resets. */ 532 mdelay(100); 533 /* clear core domain reset bit */ 534 OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1); 535 536 return 0; 537 } 538 539 /* Re-initialize Octeon hardware registers */ 540 static void octep_reinit_regs_cn93_pf(struct octep_device *oct) 541 { 542 u32 i; 543 544 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 545 oct->hw_ops.setup_iq_regs(oct, i); 546 547 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 548 oct->hw_ops.setup_oq_regs(oct, i); 549 550 oct->hw_ops.enable_interrupts(oct); 551 oct->hw_ops.enable_io_queues(oct); 552 553 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 554 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 555 } 556 557 /* Enable all interrupts */ 558 static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) 559 { 560 u64 intr_mask = 0ULL; 561 int srn, num_rings, i; 562 563 srn = CFG_GET_PORTS_PF_SRN(oct->conf); 564 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 565 566 for (i = 0; i < num_rings; i++) 567 intr_mask |= (0x1ULL << (srn + i)); 568 569 octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); 570 octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); 571 octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); 572 octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); 573 octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); 574 } 575 576 /* Disable all interrupts */ 577 static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) 578 { 579 u64 intr_mask = 0ULL; 580 int srn, num_rings, i; 581 582 srn = CFG_GET_PORTS_PF_SRN(oct->conf); 583 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 584 585 for (i = 0; i < num_rings; i++) 586 intr_mask |= (0x1ULL << (srn + i)); 587 588 octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); 589 octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); 590 octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); 591 octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); 592 octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); 593 } 594 595 /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ 596 static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) 597 { 598 u32 pkt_in_done = readl(iq->inst_cnt_reg); 599 u32 last_done, new_idx; 600 601 last_done = pkt_in_done - iq->pkt_in_done; 602 iq->pkt_in_done = pkt_in_done; 603 604 new_idx = (iq->octep_read_index + last_done) % iq->max_count; 605 606 return new_idx; 607 } 608 609 /* Enable a hardware Tx Queue */ 610 static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) 611 { 612 u64 loop = HZ; 613 u64 reg_val; 614 615 iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 616 617 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); 618 619 while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) && 620 loop--) { 621 schedule_timeout_interruptible(1); 622 } 623 624 reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no)); 625 reg_val |= (0x1ULL << 62); 626 octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 627 628 reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); 629 reg_val |= 0x1ULL; 630 octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); 631 } 632 633 /* Enable a hardware Rx Queue */ 634 static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) 635 { 636 u64 reg_val = 0ULL; 637 638 oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 639 640 reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no)); 641 reg_val |= (0x1ULL << 62); 642 octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 643 644 octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); 645 646 reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); 647 reg_val |= 0x1ULL; 648 octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); 649 } 650 651 /* Enable all hardware Tx/Rx Queues assined to PF */ 652 static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) 653 { 654 u8 q; 655 656 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 657 octep_enable_iq_cn93_pf(oct, q); 658 octep_enable_oq_cn93_pf(oct, q); 659 } 660 } 661 662 /* Disable a hardware Tx Queue assined to PF */ 663 static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) 664 { 665 u64 reg_val = 0ULL; 666 667 iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 668 669 reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); 670 reg_val &= ~0x1ULL; 671 octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); 672 } 673 674 /* Disable a hardware Rx Queue assined to PF */ 675 static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) 676 { 677 u64 reg_val = 0ULL; 678 679 oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); 680 reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); 681 reg_val &= ~0x1ULL; 682 octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); 683 } 684 685 /* Disable all hardware Tx/Rx Queues assined to PF */ 686 static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) 687 { 688 int q = 0; 689 690 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 691 octep_disable_iq_cn93_pf(oct, q); 692 octep_disable_oq_cn93_pf(oct, q); 693 } 694 } 695 696 /* Dump hardware registers (including Tx/Rx queues) for debugging. */ 697 static void octep_dump_registers_cn93_pf(struct octep_device *oct) 698 { 699 u8 srn, num_rings, q; 700 701 srn = CFG_GET_PORTS_PF_SRN(oct->conf); 702 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 703 704 for (q = srn; q < srn + num_rings; q++) 705 cn93_dump_regs(oct, q); 706 } 707 708 /** 709 * octep_device_setup_cn93_pf() - Setup Octeon device. 710 * 711 * @oct: Octeon device private data structure. 712 * 713 * - initialize hardware operations. 714 * - get target side pcie port number for the device. 715 * - setup window access to hardware registers. 716 * - set initial configuration and max limits. 717 * - setup hardware mapping of rings to the PF device. 718 */ 719 void octep_device_setup_cn93_pf(struct octep_device *oct) 720 { 721 oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf; 722 oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; 723 oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; 724 725 oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; 726 oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; 727 oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; 728 oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; 729 730 oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf; 731 oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf; 732 oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf; 733 734 oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf; 735 736 oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf; 737 oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf; 738 oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf; 739 740 oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf; 741 oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf; 742 oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf; 743 oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf; 744 745 oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf; 746 747 octep_setup_pci_window_regs_cn93_pf(oct); 748 749 oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff; 750 dev_info(&oct->pdev->dev, 751 "Octeon device using PCIE Port %d\n", oct->pcie_port); 752 753 octep_init_config_cn93_pf(oct); 754 octep_configure_ring_mapping_cn93_pf(oct); 755 } 756