1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2015 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * This file may also be available under a different license from Cavium. 20 * Contact Cavium, Inc. for more information 21 **********************************************************************/ 22 #include <linux/pci.h> 23 #include <linux/netdevice.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_main.h" 30 #include "cn66xx_regs.h" 31 #include "cn66xx_device.h" 32 33 int lio_cn6xxx_soft_reset(struct octeon_device *oct) 34 { 35 octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); 36 37 dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); 38 39 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); 40 octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); 41 42 lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); 43 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); 44 45 /* make sure that the reset is written before starting timer */ 46 mmiowb(); 47 48 /* Wait for 10ms as Octeon resets. */ 49 mdelay(100); 50 51 if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { 52 dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); 53 return 1; 54 } 55 56 dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); 57 octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); 58 59 return 0; 60 } 61 62 void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) 63 { 64 u32 val; 65 66 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); 67 if (val & 0x000c0000) { 68 dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", 69 val & 0x000c0000); 70 } 71 72 val |= 0xf; /* Enable Link error reporting */ 73 74 dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n"); 75 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); 76 } 77 78 void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, 79 enum octeon_pcie_mps mps) 80 { 81 u32 val; 82 u64 r64; 83 84 /* Read config register for MPS */ 85 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); 86 87 if (mps == PCIE_MPS_DEFAULT) { 88 mps = ((val & (0x7 << 5)) >> 5); 89 } else { 90 val &= ~(0x7 << 5); /* Turn off any MPS bits */ 91 val |= (mps << 5); /* Set MPS */ 92 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); 93 } 94 95 /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */ 96 r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); 97 r64 |= (mps << 4); 98 lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); 99 } 100 101 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, 102 enum octeon_pcie_mrrs mrrs) 103 { 104 u32 val; 105 u64 r64; 106 107 /* Read config register for MRRS */ 108 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); 109 110 if (mrrs == PCIE_MRRS_DEFAULT) { 111 mrrs = ((val & (0x7 << 12)) >> 12); 112 } else { 113 val &= ~(0x7 << 12); /* Turn off any MRRS bits */ 114 val |= (mrrs << 12); /* Set MRRS */ 115 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); 116 } 117 118 /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */ 119 r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port)); 120 r64 |= mrrs; 121 octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64); 122 123 /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */ 124 r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); 125 r64 |= mrrs; 126 lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); 127 } 128 129 u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct) 130 { 131 /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier 132 * for SLI. 133 */ 134 return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50; 135 } 136 137 u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, 138 u32 time_intr_in_us) 139 { 140 /* This gives the SLI clock per microsec */ 141 u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct); 142 143 /* core clock per us / oq ticks will be fractional. TO avoid that 144 * we use the method below. 145 */ 146 147 /* This gives the clock cycles per millisecond */ 148 oqticks_per_us *= 1000; 149 150 /* This gives the oq ticks (1024 core clock cycles) per millisecond */ 151 oqticks_per_us /= 1024; 152 153 /* time_intr is in microseconds. The next 2 steps gives the oq ticks 154 * corressponding to time_intr. 155 */ 156 oqticks_per_us *= time_intr_in_us; 157 oqticks_per_us /= 1000; 158 159 return oqticks_per_us; 160 } 161 162 void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct) 163 { 164 /* Select Round-Robin Arb, ES, RO, NS for Input Queues */ 165 octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL, 166 CN6XXX_INPUT_CTL_MASK); 167 168 /* Instruction Read Size - Max 4 instructions per PCIE Read */ 169 octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE, 170 0xFFFFFFFFFFFFFFFFULL); 171 172 /* Select PCIE Port for all Input rings. */ 173 octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT, 174 (oct->pcie_port * 0x5555555555555555ULL)); 175 } 176 177 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct) 178 { 179 u64 pktctl; 180 181 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; 182 183 pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); 184 185 /* 66XX SPECIFIC */ 186 if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4) 187 /* Disable RING_EN if only upto 4 rings are used. */ 188 pktctl &= ~(1 << 4); 189 else 190 pktctl |= (1 << 4); 191 192 if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) 193 pktctl |= 0xF; 194 else 195 /* Disable per-port backpressure. */ 196 pktctl &= ~0xF; 197 octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); 198 } 199 200 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) 201 { 202 u32 time_threshold; 203 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; 204 205 /* / Select PCI-E Port for all Output queues */ 206 octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64, 207 (oct->pcie_port * 0x5555555555555555ULL)); 208 209 if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) { 210 octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32); 211 } else { 212 /* / Set Output queue watermark to 0 to disable backpressure */ 213 octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0); 214 } 215 216 /* / Select Info Ptr for length & data */ 217 octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF); 218 219 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ 220 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); 221 222 /* Select ES, RO, NS setting from register for Output Queue Packet 223 * Address 224 */ 225 octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); 226 227 /* No Relaxed Ordering, No Snoop, 64-bit swap for Output 228 * Queue ScatterList 229 */ 230 octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0); 231 octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0); 232 233 /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */ 234 #ifdef __BIG_ENDIAN_BITFIELD 235 octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 236 0x5555555555555555ULL); 237 #else 238 octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL); 239 #endif 240 241 /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */ 242 octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0); 243 octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0); 244 octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64, 245 0x5555555555555555ULL); 246 247 /* / Set up interrupt packet and time threshold */ 248 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, 249 (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf)); 250 time_threshold = 251 lio_cn6xxx_get_oq_ticks(oct, (u32) 252 CFG_GET_OQ_INTR_TIME(cn6xxx->conf)); 253 254 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); 255 } 256 257 static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct) 258 { 259 lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); 260 lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B); 261 lio_cn6xxx_enable_error_reporting(oct); 262 263 lio_cn6xxx_setup_global_input_regs(oct); 264 lio_cn66xx_setup_pkt_ctl_regs(oct); 265 lio_cn6xxx_setup_global_output_regs(oct); 266 267 /* Default error timeout value should be 0x200000 to avoid host hang 268 * when reads invalid register 269 */ 270 octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); 271 return 0; 272 } 273 274 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) 275 { 276 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; 277 278 /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */ 279 octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); 280 281 /* Write the start of the input queue's ring and its size */ 282 octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no), 283 iq->base_addr_dma); 284 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); 285 286 /* Remember the doorbell & instruction count register addr for this 287 * queue 288 */ 289 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); 290 iq->inst_cnt_reg = oct->mmio[0].hw_addr 291 + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no); 292 dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", 293 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); 294 295 /* Store the current instruction counter 296 * (used in flush_iq calculation) 297 */ 298 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); 299 } 300 301 static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) 302 { 303 lio_cn6xxx_setup_iq_regs(oct, iq_no); 304 305 /* Backpressure for this queue - WMARK set to all F's. This effectively 306 * disables the backpressure mechanism. 307 */ 308 octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no), 309 (0xFFFFFFFFULL << 32)); 310 } 311 312 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) 313 { 314 u32 intr; 315 struct octeon_droq *droq = oct->droq[oq_no]; 316 317 octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no), 318 droq->desc_ring_dma); 319 octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); 320 321 octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no), 322 (droq->buffer_size | (OCT_RH_SIZE << 16))); 323 324 /* Get the mapped address of the pkt_sent and pkts_credit regs */ 325 droq->pkts_sent_reg = 326 oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no); 327 droq->pkts_credit_reg = 328 oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no); 329 330 /* Enable this output queue to generate Packet Timer Interrupt */ 331 intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); 332 intr |= (1 << oq_no); 333 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr); 334 335 /* Enable this output queue to generate Packet Timer Interrupt */ 336 intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); 337 intr |= (1 << oq_no); 338 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr); 339 } 340 341 int lio_cn6xxx_enable_io_queues(struct octeon_device *oct) 342 { 343 u32 mask; 344 345 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE); 346 mask |= oct->io_qmask.iq64B; 347 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask); 348 349 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); 350 mask |= oct->io_qmask.iq; 351 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); 352 353 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); 354 mask |= oct->io_qmask.oq; 355 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); 356 357 return 0; 358 } 359 360 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) 361 { 362 int i; 363 u32 mask, loop = HZ; 364 u32 d32; 365 366 /* Reset the Enable bits for Input Queues. */ 367 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); 368 mask ^= oct->io_qmask.iq; 369 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); 370 371 /* Wait until hardware indicates that the queues are out of reset. */ 372 mask = (u32)oct->io_qmask.iq; 373 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); 374 while (((d32 & mask) != mask) && loop--) { 375 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); 376 schedule_timeout_uninterruptible(1); 377 } 378 379 /* Reset the doorbell register for each Input queue. */ 380 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 381 if (!(oct->io_qmask.iq & (1ULL << i))) 382 continue; 383 octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); 384 d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); 385 } 386 387 /* Reset the Enable bits for Output Queues. */ 388 mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); 389 mask ^= oct->io_qmask.oq; 390 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); 391 392 /* Wait until hardware indicates that the queues are out of reset. */ 393 loop = HZ; 394 mask = (u32)oct->io_qmask.oq; 395 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); 396 while (((d32 & mask) != mask) && loop--) { 397 d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); 398 schedule_timeout_uninterruptible(1); 399 } 400 ; 401 402 /* Reset the doorbell register for each Output queue. */ 403 /* for (i = 0; i < oct->num_oqs; i++) { */ 404 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 405 if (!(oct->io_qmask.oq & (1ULL << i))) 406 continue; 407 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); 408 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); 409 410 d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i)); 411 octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32); 412 } 413 414 d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); 415 if (d32) 416 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32); 417 418 d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); 419 if (d32) 420 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32); 421 } 422 423 void 424 lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, 425 u64 core_addr, 426 u32 idx, 427 int valid) 428 { 429 u64 bar1; 430 431 if (valid == 0) { 432 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); 433 lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), 434 CN6XXX_BAR1_REG(idx, oct->pcie_port)); 435 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); 436 return; 437 } 438 439 /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of 440 * the Core Addr 441 */ 442 lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK), 443 CN6XXX_BAR1_REG(idx, oct->pcie_port)); 444 445 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); 446 } 447 448 void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, 449 u32 idx, 450 u32 mask) 451 { 452 lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port)); 453 } 454 455 u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) 456 { 457 return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); 458 } 459 460 u32 461 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) 462 { 463 u32 new_idx = readl(iq->inst_cnt_reg); 464 465 /* The new instr cnt reg is a 32-bit counter that can roll over. We have 466 * noted the counter's initial value at init time into 467 * reset_instr_cnt 468 */ 469 if (iq->reset_instr_cnt < new_idx) 470 new_idx -= iq->reset_instr_cnt; 471 else 472 new_idx += (0xffffffff - iq->reset_instr_cnt) + 1; 473 474 /* Modulo of the new index with the IQ size will give us 475 * the new index. 476 */ 477 new_idx %= iq->max_count; 478 479 return new_idx; 480 } 481 482 void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, 483 u8 unused __attribute__((unused))) 484 { 485 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; 486 u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE; 487 488 /* Enable Interrupt */ 489 writeq(mask, cn6xxx->intr_enb_reg64); 490 } 491 492 void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, 493 u8 unused __attribute__((unused))) 494 { 495 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; 496 497 /* Disable Interrupts */ 498 writeq(0, cn6xxx->intr_enb_reg64); 499 500 /* make sure interrupts are really disabled */ 501 mmiowb(); 502 } 503 504 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) 505 { 506 /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register 507 * to determine the PCIE port # 508 */ 509 oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff; 510 511 dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); 512 } 513 514 static void 515 lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) 516 { 517 dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", 518 CVM_CAST64(intr64)); 519 } 520 521 static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) 522 { 523 struct octeon_droq *droq; 524 int oq_no; 525 u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; 526 u32 droq_cnt_enb, droq_cnt_mask; 527 528 droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); 529 droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); 530 droq_mask = droq_cnt_mask & droq_cnt_enb; 531 532 droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); 533 droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); 534 droq_mask |= (droq_time_mask & droq_int_enb); 535 536 droq_mask &= oct->io_qmask.oq; 537 538 oct->droq_intr = 0; 539 540 /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ 541 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { 542 if (!(droq_mask & (1ULL << oq_no))) 543 continue; 544 545 droq = oct->droq[oq_no]; 546 pkt_count = octeon_droq_check_hw_for_pkts(droq); 547 if (pkt_count) { 548 oct->droq_intr |= (1ULL << oq_no); 549 if (droq->ops.poll_mode) { 550 u32 value; 551 u32 reg; 552 553 struct octeon_cn6xxx *cn6xxx = 554 (struct octeon_cn6xxx *)oct->chip; 555 556 /* disable interrupts for this droq */ 557 spin_lock 558 (&cn6xxx->lock_for_droq_int_enb_reg); 559 reg = CN6XXX_SLI_PKT_TIME_INT_ENB; 560 value = octeon_read_csr(oct, reg); 561 value &= ~(1 << oq_no); 562 octeon_write_csr(oct, reg, value); 563 reg = CN6XXX_SLI_PKT_CNT_INT_ENB; 564 value = octeon_read_csr(oct, reg); 565 value &= ~(1 << oq_no); 566 octeon_write_csr(oct, reg, value); 567 568 /* Ensure that the enable register is written. 569 */ 570 mmiowb(); 571 572 spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); 573 } 574 } 575 } 576 577 droq_time_mask &= oct->io_qmask.oq; 578 droq_cnt_mask &= oct->io_qmask.oq; 579 580 /* Reset the PKT_CNT/TIME_INT registers. */ 581 if (droq_time_mask) 582 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask); 583 584 if (droq_cnt_mask) /* reset PKT_CNT register:66xx */ 585 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask); 586 587 return 0; 588 } 589 590 irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev) 591 { 592 struct octeon_device *oct = (struct octeon_device *)dev; 593 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; 594 u64 intr64; 595 596 intr64 = readq(cn6xxx->intr_sum_reg64); 597 598 /* If our device has interrupted, then proceed. 599 * Also check for all f's if interrupt was triggered on an error 600 * and the PCI read fails. 601 */ 602 if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL)) 603 return IRQ_NONE; 604 605 oct->int_status = 0; 606 607 if (intr64 & CN6XXX_INTR_ERR) 608 lio_cn6xxx_process_pcie_error_intr(oct, intr64); 609 610 if (intr64 & CN6XXX_INTR_PKT_DATA) { 611 lio_cn6xxx_process_droq_intr_regs(oct); 612 oct->int_status |= OCT_DEV_INTR_PKT_DATA; 613 } 614 615 if (intr64 & CN6XXX_INTR_DMA0_FORCE) 616 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE; 617 618 if (intr64 & CN6XXX_INTR_DMA1_FORCE) 619 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE; 620 621 /* Clear the current interrupts */ 622 writeq(intr64, cn6xxx->intr_sum_reg64); 623 624 return IRQ_HANDLED; 625 } 626 627 void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, 628 void *chip, 629 struct octeon_reg_list *reg_list) 630 { 631 u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; 632 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; 633 634 reg_list->pci_win_wr_addr_hi = 635 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI); 636 reg_list->pci_win_wr_addr_lo = 637 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO); 638 reg_list->pci_win_wr_addr = 639 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64); 640 641 reg_list->pci_win_rd_addr_hi = 642 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI); 643 reg_list->pci_win_rd_addr_lo = 644 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO); 645 reg_list->pci_win_rd_addr = 646 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64); 647 648 reg_list->pci_win_wr_data_hi = 649 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI); 650 reg_list->pci_win_wr_data_lo = 651 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO); 652 reg_list->pci_win_wr_data = 653 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64); 654 655 reg_list->pci_win_rd_data_hi = 656 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI); 657 reg_list->pci_win_rd_data_lo = 658 (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO); 659 reg_list->pci_win_rd_data = 660 (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64); 661 662 lio_cn6xxx_get_pcie_qlmport(oct); 663 664 cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64; 665 cn6xxx->intr_mask64 = CN6XXX_INTR_MASK; 666 cn6xxx->intr_enb_reg64 = 667 bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port); 668 } 669 670 int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) 671 { 672 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; 673 674 if (octeon_map_pci_barx(oct, 0, 0)) 675 return 1; 676 677 if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { 678 dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n", 679 __func__); 680 octeon_unmap_pci_barx(oct, 0); 681 return 1; 682 } 683 684 spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg); 685 686 oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs; 687 oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; 688 689 oct->fn_list.soft_reset = lio_cn6xxx_soft_reset; 690 oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs; 691 oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; 692 693 oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; 694 oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; 695 oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; 696 697 oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; 698 oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; 699 oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; 700 701 oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; 702 oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; 703 704 lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); 705 706 cn6xxx->conf = (struct octeon_config *) 707 oct_get_config_info(oct, LIO_210SV); 708 if (!cn6xxx->conf) { 709 dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n", 710 __func__); 711 octeon_unmap_pci_barx(oct, 0); 712 octeon_unmap_pci_barx(oct, 1); 713 return 1; 714 } 715 716 oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); 717 718 return 0; 719 } 720 721 int lio_validate_cn6xxx_config_info(struct octeon_device *oct, 722 struct octeon_config *conf6xxx) 723 { 724 /* int total_instrs = 0; */ 725 726 if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { 727 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", 728 __func__, CFG_GET_IQ_MAX_Q(conf6xxx), 729 CN6XXX_MAX_INPUT_QUEUES); 730 return 1; 731 } 732 733 if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) { 734 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", 735 __func__, CFG_GET_OQ_MAX_Q(conf6xxx), 736 CN6XXX_MAX_OUTPUT_QUEUES); 737 return 1; 738 } 739 740 if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR && 741 CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) { 742 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", 743 __func__); 744 return 1; 745 } 746 if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) || 747 !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) { 748 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", 749 __func__); 750 return 1; 751 } 752 753 if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) { 754 dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n", 755 __func__); 756 return 1; 757 } 758 759 return 0; 760 } 761