1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include "liquidio_common.h" 21 #include "octeon_droq.h" 22 #include "octeon_iq.h" 23 #include "response_manager.h" 24 #include "octeon_device.h" 25 #include "octeon_main.h" 26 #include "cn66xx_regs.h" 27 #include "cn66xx_device.h" 28 #include "cn68xx_device.h" 29 #include "cn68xx_regs.h" 30 #include "cn68xx_device.h" 31 32 static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) 33 { 34 u32 i; 35 u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 }; 36 37 lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL); 38 dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n", 39 lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL)); 40 41 for (i = 0; i < 6; i++) { 42 /* Prevent service of instruction queue for all DMA engines 43 * Engine 5 will remain 0. Engines 0 - 4 will be setup by 44 * core. 45 */ 46 lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i)); 47 lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i)); 48 dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i, 49 lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i))); 50 } 51 52 /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set 53 * separately. 54 */ 55 56 lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL); 57 dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n", 58 lio_pci_readq(oct, CN6XXX_DPI_CTL)); 59 } 60 61 static int lio_cn68xx_soft_reset(struct octeon_device *oct) 62 { 63 lio_cn6xxx_soft_reset(oct); 64 lio_cn68xx_set_dpi_regs(oct); 65 66 return 0; 67 } 68 69 static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct) 70 { 71 struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; 72 u64 pktctl, tx_pipe, max_oqs; 73 74 pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); 75 76 /* 68XX specific */ 77 max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx)); 78 tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE); 79 tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */ 80 tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */ 81 octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe); 82 83 if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf)) 84 pktctl |= 0xF; 85 else 86 /* Disable per-port backpressure. */ 87 pktctl &= ~0xF; 88 octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); 89 } 90 91 static int lio_cn68xx_setup_device_regs(struct octeon_device *oct) 92 { 93 lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); 94 lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B); 95 lio_cn6xxx_enable_error_reporting(oct); 96 97 lio_cn6xxx_setup_global_input_regs(oct); 98 lio_cn68xx_setup_pkt_ctl_regs(oct); 99 lio_cn6xxx_setup_global_output_regs(oct); 100 101 /* Default error timeout value should be 0x200000 to avoid host hang 102 * when reads invalid register 103 */ 104 octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); 105 106 return 0; 107 } 108 109 static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct) 110 { 111 u32 val = 0; 112 113 /* Set M_VEND1_DRP and M_VEND0_DRP bits */ 114 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val); 115 val |= 0x3; 116 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); 117 } 118 119 static int lio_is_210nv(struct octeon_device *oct) 120 { 121 u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); 122 123 return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0); 124 } 125 126 int lio_setup_cn68xx_octeon_device(struct octeon_device *oct) 127 { 128 struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; 129 u16 card_type = LIO_410NV; 130 131 if (octeon_map_pci_barx(oct, 0, 0)) 132 return 1; 133 134 if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { 135 dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n", 136 __func__); 137 octeon_unmap_pci_barx(oct, 0); 138 return 1; 139 } 140 141 spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg); 142 143 oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs; 144 oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; 145 146 oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; 147 oct->fn_list.soft_reset = lio_cn68xx_soft_reset; 148 oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs; 149 oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; 150 151 oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; 152 oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; 153 oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; 154 155 oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; 156 oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; 157 158 oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; 159 oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; 160 161 lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); 162 163 /* Determine variant of card */ 164 if (lio_is_210nv(oct)) 165 card_type = LIO_210NV; 166 167 cn68xx->conf = (struct octeon_config *) 168 oct_get_config_info(oct, card_type); 169 if (!cn68xx->conf) { 170 dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n", 171 __func__, 172 (card_type == LIO_410NV) ? LIO_410NV_NAME : 173 LIO_210NV_NAME); 174 octeon_unmap_pci_barx(oct, 0); 175 octeon_unmap_pci_barx(oct, 1); 176 return 1; 177 } 178 179 oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); 180 181 lio_cn68xx_vendor_message_fix(oct); 182 183 return 0; 184 } 185