1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include "liquidio_common.h" 21 #include "octeon_droq.h" 22 #include "octeon_iq.h" 23 #include "response_manager.h" 24 #include "octeon_device.h" 25 #include "octeon_main.h" 26 #include "cn66xx_regs.h" 27 #include "cn66xx_device.h" 28 #include "cn68xx_regs.h" 29 30 static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) 31 { 32 u32 i; 33 u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 }; 34 35 lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL); 36 dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n", 37 lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL)); 38 39 for (i = 0; i < 6; i++) { 40 /* Prevent service of instruction queue for all DMA engines 41 * Engine 5 will remain 0. Engines 0 - 4 will be setup by 42 * core. 43 */ 44 lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i)); 45 lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i)); 46 dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i, 47 lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i))); 48 } 49 50 /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set 51 * separately. 52 */ 53 54 lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL); 55 dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n", 56 lio_pci_readq(oct, CN6XXX_DPI_CTL)); 57 } 58 59 static int lio_cn68xx_soft_reset(struct octeon_device *oct) 60 { 61 lio_cn6xxx_soft_reset(oct); 62 lio_cn68xx_set_dpi_regs(oct); 63 64 return 0; 65 } 66 67 static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct) 68 { 69 struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; 70 u64 pktctl, tx_pipe, max_oqs; 71 72 pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); 73 74 /* 68XX specific */ 75 max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx)); 76 tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE); 77 tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */ 78 tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */ 79 octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe); 80 81 if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf)) 82 pktctl |= 0xF; 83 else 84 /* Disable per-port backpressure. */ 85 pktctl &= ~0xF; 86 octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); 87 } 88 89 static int lio_cn68xx_setup_device_regs(struct octeon_device *oct) 90 { 91 lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); 92 lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B); 93 lio_cn6xxx_enable_error_reporting(oct); 94 95 lio_cn6xxx_setup_global_input_regs(oct); 96 lio_cn68xx_setup_pkt_ctl_regs(oct); 97 lio_cn6xxx_setup_global_output_regs(oct); 98 99 /* Default error timeout value should be 0x200000 to avoid host hang 100 * when reads invalid register 101 */ 102 octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); 103 104 return 0; 105 } 106 107 static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct) 108 { 109 u32 val = 0; 110 111 /* Set M_VEND1_DRP and M_VEND0_DRP bits */ 112 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val); 113 val |= 0x3; 114 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); 115 } 116 117 static int lio_is_210nv(struct octeon_device *oct) 118 { 119 u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); 120 121 return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0); 122 } 123 124 int lio_setup_cn68xx_octeon_device(struct octeon_device *oct) 125 { 126 struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; 127 u16 card_type = LIO_410NV; 128 129 if (octeon_map_pci_barx(oct, 0, 0)) 130 return 1; 131 132 if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { 133 dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n", 134 __func__); 135 octeon_unmap_pci_barx(oct, 0); 136 return 1; 137 } 138 139 spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg); 140 141 oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs; 142 oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; 143 144 oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; 145 oct->fn_list.soft_reset = lio_cn68xx_soft_reset; 146 oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs; 147 oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; 148 149 oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; 150 oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; 151 oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; 152 153 oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; 154 oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; 155 156 oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; 157 oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; 158 159 lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); 160 161 /* Determine variant of card */ 162 if (lio_is_210nv(oct)) 163 card_type = LIO_210NV; 164 165 cn68xx->conf = (struct octeon_config *) 166 oct_get_config_info(oct, card_type); 167 if (!cn68xx->conf) { 168 dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n", 169 __func__, 170 (card_type == LIO_410NV) ? LIO_410NV_NAME : 171 LIO_210NV_NAME); 172 octeon_unmap_pci_barx(oct, 0); 173 octeon_unmap_pci_barx(oct, 1); 174 return 1; 175 } 176 177 oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); 178 179 lio_cn68xx_vendor_message_fix(oct); 180 181 return 0; 182 } 183