172c00912SRaghu Vatsavayi /**********************************************************************
272c00912SRaghu Vatsavayi * Author: Cavium, Inc.
372c00912SRaghu Vatsavayi *
472c00912SRaghu Vatsavayi * Contact: support@cavium.com
572c00912SRaghu Vatsavayi * Please include "LiquidIO" in the subject.
672c00912SRaghu Vatsavayi *
750579d3dSRaghu Vatsavayi * Copyright (c) 2003-2016 Cavium, Inc.
872c00912SRaghu Vatsavayi *
972c00912SRaghu Vatsavayi * This file is free software; you can redistribute it and/or modify
1072c00912SRaghu Vatsavayi * it under the terms of the GNU General Public License, Version 2, as
1172c00912SRaghu Vatsavayi * published by the Free Software Foundation.
1272c00912SRaghu Vatsavayi *
1372c00912SRaghu Vatsavayi * This file is distributed in the hope that it will be useful, but
1472c00912SRaghu Vatsavayi * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
1572c00912SRaghu Vatsavayi * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
1650579d3dSRaghu Vatsavayi * NONINFRINGEMENT. See the GNU General Public License for more details.
1750579d3dSRaghu Vatsavayi ***********************************************************************/
1872c00912SRaghu Vatsavayi #include <linux/pci.h>
1972c00912SRaghu Vatsavayi #include <linux/vmalloc.h>
2086dea55bSRaghu Vatsavayi #include <linux/etherdevice.h>
2172c00912SRaghu Vatsavayi #include "liquidio_common.h"
2272c00912SRaghu Vatsavayi #include "octeon_droq.h"
2372c00912SRaghu Vatsavayi #include "octeon_iq.h"
2472c00912SRaghu Vatsavayi #include "response_manager.h"
2572c00912SRaghu Vatsavayi #include "octeon_device.h"
2672c00912SRaghu Vatsavayi #include "cn23xx_pf_device.h"
2772c00912SRaghu Vatsavayi #include "octeon_main.h"
285d65556bSRaghu Vatsavayi #include "octeon_mailbox.h"
2972c00912SRaghu Vatsavayi
3072c00912SRaghu Vatsavayi #define RESET_NOTDONE 0
3172c00912SRaghu Vatsavayi #define RESET_DONE 1
3272c00912SRaghu Vatsavayi
3372c00912SRaghu Vatsavayi /* Change the value of SLI Packet Input Jabber Register to allow
3472c00912SRaghu Vatsavayi * VXLAN TSO packets which can be 64424 bytes, exceeding the
3572c00912SRaghu Vatsavayi * MAX_GSO_SIZE we supplied to the kernel
3672c00912SRaghu Vatsavayi */
3772c00912SRaghu Vatsavayi #define CN23XX_INPUT_JABBER 64600
3872c00912SRaghu Vatsavayi
cn23xx_dump_pf_initialized_regs(struct octeon_device * oct)3972c00912SRaghu Vatsavayi void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
4072c00912SRaghu Vatsavayi {
4172c00912SRaghu Vatsavayi int i = 0;
4272c00912SRaghu Vatsavayi u32 regval = 0;
4372c00912SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
4472c00912SRaghu Vatsavayi
4572c00912SRaghu Vatsavayi /*In cn23xx_soft_reset*/
4672c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
4772c00912SRaghu Vatsavayi "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
4872c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
4972c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
5072c00912SRaghu Vatsavayi "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
5172c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
5272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
5372c00912SRaghu Vatsavayi "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
5472c00912SRaghu Vatsavayi lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
5572c00912SRaghu Vatsavayi
5672c00912SRaghu Vatsavayi /*In cn23xx_set_dpi_regs*/
5772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
5872c00912SRaghu Vatsavayi "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
5972c00912SRaghu Vatsavayi lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
6072c00912SRaghu Vatsavayi
6172c00912SRaghu Vatsavayi for (i = 0; i < 6; i++) {
6272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
6372c00912SRaghu Vatsavayi "CN23XX_DPI_DMA_ENG_ENB", i,
6472c00912SRaghu Vatsavayi CN23XX_DPI_DMA_ENG_ENB(i),
6572c00912SRaghu Vatsavayi lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
6672c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
6772c00912SRaghu Vatsavayi "CN23XX_DPI_DMA_ENG_BUF", i,
6872c00912SRaghu Vatsavayi CN23XX_DPI_DMA_ENG_BUF(i),
6972c00912SRaghu Vatsavayi lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
7072c00912SRaghu Vatsavayi }
7172c00912SRaghu Vatsavayi
7272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
7372c00912SRaghu Vatsavayi CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
7472c00912SRaghu Vatsavayi
7572c00912SRaghu Vatsavayi /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
7672c00912SRaghu Vatsavayi pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val);
7772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
7872c00912SRaghu Vatsavayi "CN23XX_CONFIG_PCIE_DEVCTL",
7972c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
8072c00912SRaghu Vatsavayi
8172c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
8272c00912SRaghu Vatsavayi "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
8372c00912SRaghu Vatsavayi CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
8472c00912SRaghu Vatsavayi lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
8572c00912SRaghu Vatsavayi
8672c00912SRaghu Vatsavayi /*In cn23xx_specific_regs_setup */
8772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
8872c00912SRaghu Vatsavayi "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
8972c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
9072c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
9172c00912SRaghu Vatsavayi oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
9272c00912SRaghu Vatsavayi
9372c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
9472c00912SRaghu Vatsavayi "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
9572c00912SRaghu Vatsavayi (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
9672c00912SRaghu Vatsavayi
9772c00912SRaghu Vatsavayi /*In cn23xx_setup_global_mac_regs*/
9872c00912SRaghu Vatsavayi for (i = 0; i < CN23XX_MAX_MACS; i++) {
9972c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
10072c00912SRaghu Vatsavayi "CN23XX_SLI_PKT_MAC_RINFO64", i,
10172c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
10272c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64
10372c00912SRaghu Vatsavayi (oct, CN23XX_SLI_PKT_MAC_RINFO64
10472c00912SRaghu Vatsavayi (i, oct->pf_num))));
10572c00912SRaghu Vatsavayi }
10672c00912SRaghu Vatsavayi
10772c00912SRaghu Vatsavayi /*In cn23xx_setup_global_input_regs*/
10872c00912SRaghu Vatsavayi for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
10972c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
11072c00912SRaghu Vatsavayi "CN23XX_SLI_IQ_PKT_CONTROL64", i,
11172c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
11272c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64
11372c00912SRaghu Vatsavayi (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
11472c00912SRaghu Vatsavayi }
11572c00912SRaghu Vatsavayi
11672c00912SRaghu Vatsavayi /*In cn23xx_setup_global_output_regs*/
11772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
11872c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
11972c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
12072c00912SRaghu Vatsavayi
12172c00912SRaghu Vatsavayi for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
12272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
12372c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_PKT_CONTROL", i,
12472c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
12572c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr(
12672c00912SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
12772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
12872c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
12972c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
13072c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
13172c00912SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
13272c00912SRaghu Vatsavayi }
13372c00912SRaghu Vatsavayi
13472c00912SRaghu Vatsavayi /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
13572c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
13672c00912SRaghu Vatsavayi "cn23xx->intr_enb_reg64",
13772c00912SRaghu Vatsavayi CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
13872c00912SRaghu Vatsavayi CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
13972c00912SRaghu Vatsavayi
14072c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
14172c00912SRaghu Vatsavayi "cn23xx->intr_sum_reg64",
14272c00912SRaghu Vatsavayi CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
14372c00912SRaghu Vatsavayi CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
14472c00912SRaghu Vatsavayi
14572c00912SRaghu Vatsavayi /*In cn23xx_setup_iq_regs*/
14672c00912SRaghu Vatsavayi for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
14772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
14872c00912SRaghu Vatsavayi "CN23XX_SLI_IQ_BASE_ADDR64", i,
14972c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
15072c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
15172c00912SRaghu Vatsavayi oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
15272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
15372c00912SRaghu Vatsavayi "CN23XX_SLI_IQ_SIZE", i,
15472c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
15572c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr
15672c00912SRaghu Vatsavayi (oct, CN23XX_SLI_IQ_SIZE(i))));
15772c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
15872c00912SRaghu Vatsavayi "CN23XX_SLI_IQ_DOORBELL", i,
15972c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
16072c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
16172c00912SRaghu Vatsavayi oct, CN23XX_SLI_IQ_DOORBELL(i))));
16272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
16372c00912SRaghu Vatsavayi "CN23XX_SLI_IQ_INSTR_COUNT64", i,
16472c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
16572c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
16672c00912SRaghu Vatsavayi oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
16772c00912SRaghu Vatsavayi }
16872c00912SRaghu Vatsavayi
16972c00912SRaghu Vatsavayi /*In cn23xx_setup_oq_regs*/
17072c00912SRaghu Vatsavayi for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
17172c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
17272c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_BASE_ADDR64", i,
17372c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
17472c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
17572c00912SRaghu Vatsavayi oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
17672c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
17772c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_SIZE", i,
17872c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
17972c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr
18072c00912SRaghu Vatsavayi (oct, CN23XX_SLI_OQ_SIZE(i))));
18172c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
18272c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
18372c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
18472c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr(
18572c00912SRaghu Vatsavayi oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
18672c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
18772c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_PKTS_SENT", i,
18872c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
18972c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
19072c00912SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
19172c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
19272c00912SRaghu Vatsavayi "CN23XX_SLI_OQ_PKTS_CREDIT", i,
19372c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
19472c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(
19572c00912SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
19672c00912SRaghu Vatsavayi }
19772c00912SRaghu Vatsavayi
19872c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
19972c00912SRaghu Vatsavayi "CN23XX_SLI_PKT_TIME_INT",
20072c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
20172c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
20272c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
20372c00912SRaghu Vatsavayi "CN23XX_SLI_PKT_CNT_INT",
20472c00912SRaghu Vatsavayi CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
20572c00912SRaghu Vatsavayi CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
20672c00912SRaghu Vatsavayi }
20772c00912SRaghu Vatsavayi
cn23xx_pf_soft_reset(struct octeon_device * oct)208c0eab5b3SRaghu Vatsavayi static int cn23xx_pf_soft_reset(struct octeon_device *oct)
209c0eab5b3SRaghu Vatsavayi {
210c0eab5b3SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
211c0eab5b3SRaghu Vatsavayi
212c0eab5b3SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
213c0eab5b3SRaghu Vatsavayi oct->octeon_id);
214c0eab5b3SRaghu Vatsavayi
215c0eab5b3SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
216c0eab5b3SRaghu Vatsavayi
217c0eab5b3SRaghu Vatsavayi /* Initiate chip-wide soft reset */
218c0eab5b3SRaghu Vatsavayi lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
219c0eab5b3SRaghu Vatsavayi lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
220c0eab5b3SRaghu Vatsavayi
221c0eab5b3SRaghu Vatsavayi /* Wait for 100ms as Octeon resets. */
222c0eab5b3SRaghu Vatsavayi mdelay(100);
223c0eab5b3SRaghu Vatsavayi
22405a6b4caSDerek Chickles if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
225c0eab5b3SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
226c0eab5b3SRaghu Vatsavayi oct->octeon_id);
227c0eab5b3SRaghu Vatsavayi return 1;
228c0eab5b3SRaghu Vatsavayi }
229c0eab5b3SRaghu Vatsavayi
230c0eab5b3SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
231c0eab5b3SRaghu Vatsavayi oct->octeon_id);
232c0eab5b3SRaghu Vatsavayi
233c0eab5b3SRaghu Vatsavayi /* restore the reset value*/
234c0eab5b3SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
235c0eab5b3SRaghu Vatsavayi
236c0eab5b3SRaghu Vatsavayi return 0;
237c0eab5b3SRaghu Vatsavayi }
238c0eab5b3SRaghu Vatsavayi
cn23xx_enable_error_reporting(struct octeon_device * oct)2393451b97cSRaghu Vatsavayi static void cn23xx_enable_error_reporting(struct octeon_device *oct)
2403451b97cSRaghu Vatsavayi {
2413451b97cSRaghu Vatsavayi u32 regval;
2423451b97cSRaghu Vatsavayi u32 uncorrectable_err_mask, corrtable_err_status;
2433451b97cSRaghu Vatsavayi
2443451b97cSRaghu Vatsavayi pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val);
2453451b97cSRaghu Vatsavayi if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
2463451b97cSRaghu Vatsavayi uncorrectable_err_mask = 0;
2473451b97cSRaghu Vatsavayi corrtable_err_status = 0;
2483451b97cSRaghu Vatsavayi pci_read_config_dword(oct->pci_dev,
2493451b97cSRaghu Vatsavayi CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
2503451b97cSRaghu Vatsavayi &uncorrectable_err_mask);
2513451b97cSRaghu Vatsavayi pci_read_config_dword(oct->pci_dev,
2523451b97cSRaghu Vatsavayi CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
2533451b97cSRaghu Vatsavayi &corrtable_err_status);
2543451b97cSRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
2553451b97cSRaghu Vatsavayi "\tdev_ctl_status_reg = 0x%08x\n"
2563451b97cSRaghu Vatsavayi "\tuncorrectable_error_mask_reg = 0x%08x\n"
2573451b97cSRaghu Vatsavayi "\tcorrectable_error_status_reg = 0x%08x\n",
2583451b97cSRaghu Vatsavayi regval, uncorrectable_err_mask,
2593451b97cSRaghu Vatsavayi corrtable_err_status);
2603451b97cSRaghu Vatsavayi }
2613451b97cSRaghu Vatsavayi
2623451b97cSRaghu Vatsavayi regval |= 0xf; /* Enable Link error reporting */
2633451b97cSRaghu Vatsavayi
2643451b97cSRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
2653451b97cSRaghu Vatsavayi oct->octeon_id);
2663451b97cSRaghu Vatsavayi pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
2673451b97cSRaghu Vatsavayi }
2683451b97cSRaghu Vatsavayi
cn23xx_coprocessor_clock(struct octeon_device * oct)26972c00912SRaghu Vatsavayi static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
27072c00912SRaghu Vatsavayi {
27172c00912SRaghu Vatsavayi /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
27272c00912SRaghu Vatsavayi * for SLI.
27372c00912SRaghu Vatsavayi */
27472c00912SRaghu Vatsavayi
27572c00912SRaghu Vatsavayi /* TBD: get the info in Hand-shake */
27672c00912SRaghu Vatsavayi return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
27772c00912SRaghu Vatsavayi }
27872c00912SRaghu Vatsavayi
cn23xx_pf_get_oq_ticks(struct octeon_device * oct,u32 time_intr_in_us)2793451b97cSRaghu Vatsavayi u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
2803451b97cSRaghu Vatsavayi {
2813451b97cSRaghu Vatsavayi /* This gives the SLI clock per microsec */
2823451b97cSRaghu Vatsavayi u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
2833451b97cSRaghu Vatsavayi
2843451b97cSRaghu Vatsavayi oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
2853451b97cSRaghu Vatsavayi
2863451b97cSRaghu Vatsavayi /* This gives the clock cycles per millisecond */
2873451b97cSRaghu Vatsavayi oqticks_per_us *= 1000;
2883451b97cSRaghu Vatsavayi
2893451b97cSRaghu Vatsavayi /* This gives the oq ticks (1024 core clock cycles) per millisecond */
2903451b97cSRaghu Vatsavayi oqticks_per_us /= 1024;
2913451b97cSRaghu Vatsavayi
2923451b97cSRaghu Vatsavayi /* time_intr is in microseconds. The next 2 steps gives the oq ticks
2933451b97cSRaghu Vatsavayi * corressponding to time_intr.
2943451b97cSRaghu Vatsavayi */
2953451b97cSRaghu Vatsavayi oqticks_per_us *= time_intr_in_us;
2963451b97cSRaghu Vatsavayi oqticks_per_us /= 1000;
2973451b97cSRaghu Vatsavayi
2983451b97cSRaghu Vatsavayi return oqticks_per_us;
2993451b97cSRaghu Vatsavayi }
3003451b97cSRaghu Vatsavayi
cn23xx_setup_global_mac_regs(struct octeon_device * oct)3013451b97cSRaghu Vatsavayi static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
3023451b97cSRaghu Vatsavayi {
3033451b97cSRaghu Vatsavayi u16 mac_no = oct->pcie_port;
3043451b97cSRaghu Vatsavayi u16 pf_num = oct->pf_num;
305d13520c7SRaghu Vatsavayi u64 reg_val;
306d13520c7SRaghu Vatsavayi u64 temp;
3073451b97cSRaghu Vatsavayi
3083451b97cSRaghu Vatsavayi /* programming SRN and TRS for each MAC(0..3) */
3093451b97cSRaghu Vatsavayi
3103451b97cSRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
3113451b97cSRaghu Vatsavayi __func__, mac_no);
3123451b97cSRaghu Vatsavayi /* By default, mapping all 64 IOQs to a single MACs */
3133451b97cSRaghu Vatsavayi
3143451b97cSRaghu Vatsavayi reg_val =
3153451b97cSRaghu Vatsavayi octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
3163451b97cSRaghu Vatsavayi
3173451b97cSRaghu Vatsavayi if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
3183451b97cSRaghu Vatsavayi /* setting SRN <6:0> */
3193451b97cSRaghu Vatsavayi reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
3203451b97cSRaghu Vatsavayi } else {
3213451b97cSRaghu Vatsavayi /* setting SRN <6:0> */
3223451b97cSRaghu Vatsavayi reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
3233451b97cSRaghu Vatsavayi }
3243451b97cSRaghu Vatsavayi
3253451b97cSRaghu Vatsavayi /* setting TRS <23:16> */
3263451b97cSRaghu Vatsavayi reg_val = reg_val |
3273451b97cSRaghu Vatsavayi (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
328d13520c7SRaghu Vatsavayi /* setting RPVF <39:32> */
329d13520c7SRaghu Vatsavayi temp = oct->sriov_info.rings_per_vf & 0xff;
330d13520c7SRaghu Vatsavayi reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS);
331d13520c7SRaghu Vatsavayi
332d13520c7SRaghu Vatsavayi /* setting NVFS <55:48> */
333d13520c7SRaghu Vatsavayi temp = oct->sriov_info.max_vfs & 0xff;
334d13520c7SRaghu Vatsavayi reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
335d13520c7SRaghu Vatsavayi
3363451b97cSRaghu Vatsavayi /* write these settings to MAC register */
3373451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
3383451b97cSRaghu Vatsavayi reg_val);
3393451b97cSRaghu Vatsavayi
3403451b97cSRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
3413451b97cSRaghu Vatsavayi mac_no, pf_num, (u64)octeon_read_csr64
3423451b97cSRaghu Vatsavayi (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
3433451b97cSRaghu Vatsavayi }
3443451b97cSRaghu Vatsavayi
cn23xx_reset_io_queues(struct octeon_device * oct)3451b7c55c4SRaghu Vatsavayi static int cn23xx_reset_io_queues(struct octeon_device *oct)
3461b7c55c4SRaghu Vatsavayi {
3471b7c55c4SRaghu Vatsavayi int ret_val = 0;
3481b7c55c4SRaghu Vatsavayi u64 d64;
3491b7c55c4SRaghu Vatsavayi u32 q_no, srn, ern;
3501b7c55c4SRaghu Vatsavayi u32 loop = 1000;
3511b7c55c4SRaghu Vatsavayi
3521b7c55c4SRaghu Vatsavayi srn = oct->sriov_info.pf_srn;
3531b7c55c4SRaghu Vatsavayi ern = srn + oct->sriov_info.num_pf_rings;
3541b7c55c4SRaghu Vatsavayi
3551b7c55c4SRaghu Vatsavayi /*As per HRM reg description, s/w cant write 0 to ENB. */
3561b7c55c4SRaghu Vatsavayi /*to make the queue off, need to set the RST bit. */
3571b7c55c4SRaghu Vatsavayi
3581b7c55c4SRaghu Vatsavayi /* Reset the Enable bit for all the 64 IQs. */
3591b7c55c4SRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
3601b7c55c4SRaghu Vatsavayi /* set RST bit to 1. This bit applies to both IQ and OQ */
3611b7c55c4SRaghu Vatsavayi d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
3621b7c55c4SRaghu Vatsavayi d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
3631b7c55c4SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
3641b7c55c4SRaghu Vatsavayi }
3651b7c55c4SRaghu Vatsavayi
3661b7c55c4SRaghu Vatsavayi /*wait until the RST bit is clear or the RST and quite bits are set*/
3671b7c55c4SRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
3681b7c55c4SRaghu Vatsavayi u64 reg_val = octeon_read_csr64(oct,
3691b7c55c4SRaghu Vatsavayi CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
3701b7c55c4SRaghu Vatsavayi while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
3711b7c55c4SRaghu Vatsavayi !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
3721b7c55c4SRaghu Vatsavayi loop--) {
3731b7c55c4SRaghu Vatsavayi WRITE_ONCE(reg_val, octeon_read_csr64(
3741b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
3751b7c55c4SRaghu Vatsavayi }
3761b7c55c4SRaghu Vatsavayi if (!loop) {
3771b7c55c4SRaghu Vatsavayi dev_err(&oct->pci_dev->dev,
3781b7c55c4SRaghu Vatsavayi "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
3791b7c55c4SRaghu Vatsavayi q_no);
3801b7c55c4SRaghu Vatsavayi return -1;
3811b7c55c4SRaghu Vatsavayi }
3821b7c55c4SRaghu Vatsavayi WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
3831b7c55c4SRaghu Vatsavayi ~CN23XX_PKT_INPUT_CTL_RST);
3841b7c55c4SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
3851b7c55c4SRaghu Vatsavayi READ_ONCE(reg_val));
3861b7c55c4SRaghu Vatsavayi
3871b7c55c4SRaghu Vatsavayi WRITE_ONCE(reg_val, octeon_read_csr64(
3881b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
3891b7c55c4SRaghu Vatsavayi if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
3901b7c55c4SRaghu Vatsavayi dev_err(&oct->pci_dev->dev,
3911b7c55c4SRaghu Vatsavayi "clearing the reset failed for qno: %u\n",
3921b7c55c4SRaghu Vatsavayi q_no);
3931b7c55c4SRaghu Vatsavayi ret_val = -1;
3941b7c55c4SRaghu Vatsavayi }
3951b7c55c4SRaghu Vatsavayi }
3961b7c55c4SRaghu Vatsavayi
3971b7c55c4SRaghu Vatsavayi return ret_val;
3981b7c55c4SRaghu Vatsavayi }
3991b7c55c4SRaghu Vatsavayi
cn23xx_pf_setup_global_input_regs(struct octeon_device * oct)4003451b97cSRaghu Vatsavayi static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
4013451b97cSRaghu Vatsavayi {
402d13520c7SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
403d13520c7SRaghu Vatsavayi struct octeon_instr_queue *iq;
404d13520c7SRaghu Vatsavayi u64 intr_threshold, reg_val;
4053451b97cSRaghu Vatsavayi u32 q_no, ern, srn;
4063451b97cSRaghu Vatsavayi u64 pf_num;
407d13520c7SRaghu Vatsavayi u64 vf_num;
4083451b97cSRaghu Vatsavayi
4093451b97cSRaghu Vatsavayi pf_num = oct->pf_num;
4103451b97cSRaghu Vatsavayi
4113451b97cSRaghu Vatsavayi srn = oct->sriov_info.pf_srn;
4123451b97cSRaghu Vatsavayi ern = srn + oct->sriov_info.num_pf_rings;
4133451b97cSRaghu Vatsavayi
4141b7c55c4SRaghu Vatsavayi if (cn23xx_reset_io_queues(oct))
4151b7c55c4SRaghu Vatsavayi return -1;
4161b7c55c4SRaghu Vatsavayi
4173451b97cSRaghu Vatsavayi /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
4183451b97cSRaghu Vatsavayi * for all queues.Only PF can set these bits.
4193451b97cSRaghu Vatsavayi * bits 29:30 indicate the MAC num.
4203451b97cSRaghu Vatsavayi * bits 32:47 indicate the PVF num.
4213451b97cSRaghu Vatsavayi */
4223451b97cSRaghu Vatsavayi for (q_no = 0; q_no < ern; q_no++) {
423e7efc2ceSColin Ian King reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
424d13520c7SRaghu Vatsavayi
425d13520c7SRaghu Vatsavayi /* for VF assigned queues. */
426d13520c7SRaghu Vatsavayi if (q_no < oct->sriov_info.pf_srn) {
427d13520c7SRaghu Vatsavayi vf_num = q_no / oct->sriov_info.rings_per_vf;
428d13520c7SRaghu Vatsavayi vf_num += 1; /* VF1, VF2,........ */
429d13520c7SRaghu Vatsavayi } else {
430d13520c7SRaghu Vatsavayi vf_num = 0;
431d13520c7SRaghu Vatsavayi }
432d13520c7SRaghu Vatsavayi
433d13520c7SRaghu Vatsavayi reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS;
4343451b97cSRaghu Vatsavayi reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
4353451b97cSRaghu Vatsavayi
4363451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
4373451b97cSRaghu Vatsavayi reg_val);
4383451b97cSRaghu Vatsavayi }
4393451b97cSRaghu Vatsavayi
4403451b97cSRaghu Vatsavayi /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
4413451b97cSRaghu Vatsavayi * pf queues
4423451b97cSRaghu Vatsavayi */
4433451b97cSRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
4443451b97cSRaghu Vatsavayi void __iomem *inst_cnt_reg;
4453451b97cSRaghu Vatsavayi
4463451b97cSRaghu Vatsavayi iq = oct->instr_queue[q_no];
4473451b97cSRaghu Vatsavayi if (iq)
4483451b97cSRaghu Vatsavayi inst_cnt_reg = iq->inst_cnt_reg;
4493451b97cSRaghu Vatsavayi else
4503451b97cSRaghu Vatsavayi inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
4513451b97cSRaghu Vatsavayi CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
4523451b97cSRaghu Vatsavayi
4533451b97cSRaghu Vatsavayi reg_val =
4543451b97cSRaghu Vatsavayi octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
4553451b97cSRaghu Vatsavayi
4563451b97cSRaghu Vatsavayi reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
4573451b97cSRaghu Vatsavayi
4583451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
4593451b97cSRaghu Vatsavayi reg_val);
4603451b97cSRaghu Vatsavayi
4613451b97cSRaghu Vatsavayi /* Set WMARK level for triggering PI_INT */
4623451b97cSRaghu Vatsavayi /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
4633451b97cSRaghu Vatsavayi intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
4643451b97cSRaghu Vatsavayi CN23XX_PKT_IN_DONE_WMARK_MASK;
4653451b97cSRaghu Vatsavayi
4663451b97cSRaghu Vatsavayi writeq((readq(inst_cnt_reg) &
4673451b97cSRaghu Vatsavayi ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
4683451b97cSRaghu Vatsavayi CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
4693451b97cSRaghu Vatsavayi (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
4703451b97cSRaghu Vatsavayi inst_cnt_reg);
4713451b97cSRaghu Vatsavayi }
4723451b97cSRaghu Vatsavayi return 0;
4733451b97cSRaghu Vatsavayi }
4743451b97cSRaghu Vatsavayi
cn23xx_pf_setup_global_output_regs(struct octeon_device * oct)4753451b97cSRaghu Vatsavayi static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
4763451b97cSRaghu Vatsavayi {
4773451b97cSRaghu Vatsavayi u32 reg_val;
4783451b97cSRaghu Vatsavayi u32 q_no, ern, srn;
4793451b97cSRaghu Vatsavayi u64 time_threshold;
4803451b97cSRaghu Vatsavayi
4813451b97cSRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
4823451b97cSRaghu Vatsavayi
4833451b97cSRaghu Vatsavayi srn = oct->sriov_info.pf_srn;
4843451b97cSRaghu Vatsavayi ern = srn + oct->sriov_info.num_pf_rings;
4853451b97cSRaghu Vatsavayi
4863451b97cSRaghu Vatsavayi if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
4873451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
4883451b97cSRaghu Vatsavayi } else {
4893451b97cSRaghu Vatsavayi /** Set Output queue watermark to 0 to disable backpressure */
4903451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
4913451b97cSRaghu Vatsavayi }
4923451b97cSRaghu Vatsavayi
4933451b97cSRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
4943451b97cSRaghu Vatsavayi reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
4953451b97cSRaghu Vatsavayi
496ac13d6d8SRick Farrington /* clear IPTR */
497ac13d6d8SRick Farrington reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
498ac13d6d8SRick Farrington
499c4ee5d81SPrasad Kanneganti /* set DPTR */
500c4ee5d81SPrasad Kanneganti reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
5013451b97cSRaghu Vatsavayi
5023451b97cSRaghu Vatsavayi /* reset BMODE */
5033451b97cSRaghu Vatsavayi reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
5043451b97cSRaghu Vatsavayi
5053451b97cSRaghu Vatsavayi /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
5063451b97cSRaghu Vatsavayi * for Output Queue ScatterList
5073451b97cSRaghu Vatsavayi * reset ROR_P, NSR_P
5083451b97cSRaghu Vatsavayi */
5093451b97cSRaghu Vatsavayi reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
5103451b97cSRaghu Vatsavayi reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
5113451b97cSRaghu Vatsavayi
5123451b97cSRaghu Vatsavayi #ifdef __LITTLE_ENDIAN_BITFIELD
5133451b97cSRaghu Vatsavayi reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
5143451b97cSRaghu Vatsavayi #else
5153451b97cSRaghu Vatsavayi reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
5163451b97cSRaghu Vatsavayi #endif
5173451b97cSRaghu Vatsavayi /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
5183451b97cSRaghu Vatsavayi * for Output Queue Data
5193451b97cSRaghu Vatsavayi * reset ROR, NSR
5203451b97cSRaghu Vatsavayi */
5213451b97cSRaghu Vatsavayi reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
5223451b97cSRaghu Vatsavayi reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
5233451b97cSRaghu Vatsavayi /* set the ES bit */
5243451b97cSRaghu Vatsavayi reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
5253451b97cSRaghu Vatsavayi
5263451b97cSRaghu Vatsavayi /* write all the selected settings */
5273451b97cSRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
5283451b97cSRaghu Vatsavayi
5293451b97cSRaghu Vatsavayi /* Enabling these interrupt in oct->fn_list.enable_interrupt()
5303451b97cSRaghu Vatsavayi * routine which called after IOQ init.
5313451b97cSRaghu Vatsavayi * Set up interrupt packet and time thresholds
5323451b97cSRaghu Vatsavayi * for all the OQs
5333451b97cSRaghu Vatsavayi */
5343451b97cSRaghu Vatsavayi time_threshold = cn23xx_pf_get_oq_ticks(
5353451b97cSRaghu Vatsavayi oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
5363451b97cSRaghu Vatsavayi
5373451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
5383451b97cSRaghu Vatsavayi (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
5393451b97cSRaghu Vatsavayi (time_threshold << 32)));
5403451b97cSRaghu Vatsavayi }
5413451b97cSRaghu Vatsavayi
5423451b97cSRaghu Vatsavayi /** Setting the water mark level for pko back pressure **/
5433451b97cSRaghu Vatsavayi writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
5443451b97cSRaghu Vatsavayi
5453451b97cSRaghu Vatsavayi /** Disabling setting OQs in reset when ring has no dorebells
5463451b97cSRaghu Vatsavayi * enabling this will cause of head of line blocking
5473451b97cSRaghu Vatsavayi */
5483451b97cSRaghu Vatsavayi /* Do it only for pass1.1. and pass1.2 */
5493451b97cSRaghu Vatsavayi if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
5503451b97cSRaghu Vatsavayi (oct->rev_id == OCTEON_CN23XX_REV_1_1))
5513451b97cSRaghu Vatsavayi writeq(readq((u8 *)oct->mmio[0].hw_addr +
5523451b97cSRaghu Vatsavayi CN23XX_SLI_GBL_CONTROL) | 0x2,
5533451b97cSRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
5543451b97cSRaghu Vatsavayi
5553451b97cSRaghu Vatsavayi /** Enable channel-level backpressure */
5563451b97cSRaghu Vatsavayi if (oct->pf_num)
5573451b97cSRaghu Vatsavayi writeq(0xffffffffffffffffULL,
5583451b97cSRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
5593451b97cSRaghu Vatsavayi else
5603451b97cSRaghu Vatsavayi writeq(0xffffffffffffffffULL,
5613451b97cSRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
5623451b97cSRaghu Vatsavayi }
5633451b97cSRaghu Vatsavayi
cn23xx_setup_pf_device_regs(struct octeon_device * oct)5643451b97cSRaghu Vatsavayi static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
5653451b97cSRaghu Vatsavayi {
5663451b97cSRaghu Vatsavayi cn23xx_enable_error_reporting(oct);
5673451b97cSRaghu Vatsavayi
5683451b97cSRaghu Vatsavayi /* program the MAC(0..3)_RINFO before setting up input/output regs */
5693451b97cSRaghu Vatsavayi cn23xx_setup_global_mac_regs(oct);
5703451b97cSRaghu Vatsavayi
5713451b97cSRaghu Vatsavayi if (cn23xx_pf_setup_global_input_regs(oct))
5723451b97cSRaghu Vatsavayi return -1;
5733451b97cSRaghu Vatsavayi
5743451b97cSRaghu Vatsavayi cn23xx_pf_setup_global_output_regs(oct);
5753451b97cSRaghu Vatsavayi
5763451b97cSRaghu Vatsavayi /* Default error timeout value should be 0x200000 to avoid host hang
5773451b97cSRaghu Vatsavayi * when reads invalid register
5783451b97cSRaghu Vatsavayi */
5793451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
5803451b97cSRaghu Vatsavayi CN23XX_SLI_WINDOW_CTL_DEFAULT);
5813451b97cSRaghu Vatsavayi
5823451b97cSRaghu Vatsavayi /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
5833451b97cSRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
5843451b97cSRaghu Vatsavayi return 0;
5853451b97cSRaghu Vatsavayi }
5863451b97cSRaghu Vatsavayi
cn23xx_setup_iq_regs(struct octeon_device * oct,u32 iq_no)58772c00912SRaghu Vatsavayi static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
58872c00912SRaghu Vatsavayi {
58972c00912SRaghu Vatsavayi struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
59072c00912SRaghu Vatsavayi u64 pkt_in_done;
59172c00912SRaghu Vatsavayi
59272c00912SRaghu Vatsavayi iq_no += oct->sriov_info.pf_srn;
59372c00912SRaghu Vatsavayi
59472c00912SRaghu Vatsavayi /* Write the start of the input queue's ring and its size */
59572c00912SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
59672c00912SRaghu Vatsavayi iq->base_addr_dma);
59772c00912SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
59872c00912SRaghu Vatsavayi
59972c00912SRaghu Vatsavayi /* Remember the doorbell & instruction count register addr
60072c00912SRaghu Vatsavayi * for this queue
60172c00912SRaghu Vatsavayi */
60272c00912SRaghu Vatsavayi iq->doorbell_reg =
60372c00912SRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
60472c00912SRaghu Vatsavayi iq->inst_cnt_reg =
60572c00912SRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
60672c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
60772c00912SRaghu Vatsavayi iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
60872c00912SRaghu Vatsavayi
60972c00912SRaghu Vatsavayi /* Store the current instruction counter (used in flush_iq
61072c00912SRaghu Vatsavayi * calculation)
61172c00912SRaghu Vatsavayi */
61272c00912SRaghu Vatsavayi pkt_in_done = readq(iq->inst_cnt_reg);
61372c00912SRaghu Vatsavayi
6145b07aee1SRaghu Vatsavayi if (oct->msix_on) {
6155b07aee1SRaghu Vatsavayi /* Set CINT_ENB to enable IQ interrupt */
6165b07aee1SRaghu Vatsavayi writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
6175b07aee1SRaghu Vatsavayi iq->inst_cnt_reg);
6185b07aee1SRaghu Vatsavayi } else {
61972c00912SRaghu Vatsavayi /* Clear the count by writing back what we read, but don't
62072c00912SRaghu Vatsavayi * enable interrupts
62172c00912SRaghu Vatsavayi */
62272c00912SRaghu Vatsavayi writeq(pkt_in_done, iq->inst_cnt_reg);
6235b07aee1SRaghu Vatsavayi }
62472c00912SRaghu Vatsavayi
62572c00912SRaghu Vatsavayi iq->reset_instr_cnt = 0;
62672c00912SRaghu Vatsavayi }
62772c00912SRaghu Vatsavayi
cn23xx_setup_oq_regs(struct octeon_device * oct,u32 oq_no)62872c00912SRaghu Vatsavayi static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
62972c00912SRaghu Vatsavayi {
63072c00912SRaghu Vatsavayi u32 reg_val;
63172c00912SRaghu Vatsavayi struct octeon_droq *droq = oct->droq[oq_no];
6325b07aee1SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
6335b07aee1SRaghu Vatsavayi u64 time_threshold;
6345b07aee1SRaghu Vatsavayi u64 cnt_threshold;
63572c00912SRaghu Vatsavayi
63672c00912SRaghu Vatsavayi oq_no += oct->sriov_info.pf_srn;
63772c00912SRaghu Vatsavayi
63872c00912SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
63972c00912SRaghu Vatsavayi droq->desc_ring_dma);
64072c00912SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
64172c00912SRaghu Vatsavayi
64272c00912SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
643c4ee5d81SPrasad Kanneganti droq->buffer_size);
64472c00912SRaghu Vatsavayi
64572c00912SRaghu Vatsavayi /* Get the mapped address of the pkt_sent and pkts_credit regs */
64672c00912SRaghu Vatsavayi droq->pkts_sent_reg =
64772c00912SRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
64872c00912SRaghu Vatsavayi droq->pkts_credit_reg =
64972c00912SRaghu Vatsavayi (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
65072c00912SRaghu Vatsavayi
6515b07aee1SRaghu Vatsavayi if (!oct->msix_on) {
65272c00912SRaghu Vatsavayi /* Enable this output queue to generate Packet Timer Interrupt
65372c00912SRaghu Vatsavayi */
6545b07aee1SRaghu Vatsavayi reg_val =
6555b07aee1SRaghu Vatsavayi octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
65672c00912SRaghu Vatsavayi reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
65772c00912SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
65872c00912SRaghu Vatsavayi reg_val);
65972c00912SRaghu Vatsavayi
66072c00912SRaghu Vatsavayi /* Enable this output queue to generate Packet Count Interrupt
66172c00912SRaghu Vatsavayi */
6625b07aee1SRaghu Vatsavayi reg_val =
6635b07aee1SRaghu Vatsavayi octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
66472c00912SRaghu Vatsavayi reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
66572c00912SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
66672c00912SRaghu Vatsavayi reg_val);
6675b07aee1SRaghu Vatsavayi } else {
6685b07aee1SRaghu Vatsavayi time_threshold = cn23xx_pf_get_oq_ticks(
6695b07aee1SRaghu Vatsavayi oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
6705b07aee1SRaghu Vatsavayi cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
6715b07aee1SRaghu Vatsavayi
6725b07aee1SRaghu Vatsavayi octeon_write_csr64(
6735b07aee1SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
6745b07aee1SRaghu Vatsavayi ((time_threshold << 32 | cnt_threshold)));
6755b07aee1SRaghu Vatsavayi }
67672c00912SRaghu Vatsavayi }
67772c00912SRaghu Vatsavayi
cn23xx_pf_mbox_thread(struct work_struct * work)6785d65556bSRaghu Vatsavayi static void cn23xx_pf_mbox_thread(struct work_struct *work)
6795d65556bSRaghu Vatsavayi {
6805d65556bSRaghu Vatsavayi struct cavium_wk *wk = (struct cavium_wk *)work;
6815d65556bSRaghu Vatsavayi struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
6825d65556bSRaghu Vatsavayi struct octeon_device *oct = mbox->oct_dev;
6835d65556bSRaghu Vatsavayi u64 mbox_int_val, val64;
6845d65556bSRaghu Vatsavayi u32 q_no, i;
6855d65556bSRaghu Vatsavayi
6865d65556bSRaghu Vatsavayi if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
6875d65556bSRaghu Vatsavayi /*read and clear by writing 1*/
6885d65556bSRaghu Vatsavayi mbox_int_val = readq(mbox->mbox_int_reg);
6895d65556bSRaghu Vatsavayi writeq(mbox_int_val, mbox->mbox_int_reg);
6905d65556bSRaghu Vatsavayi
6915d65556bSRaghu Vatsavayi for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
6925d65556bSRaghu Vatsavayi q_no = i * oct->sriov_info.rings_per_vf;
6935d65556bSRaghu Vatsavayi
6945d65556bSRaghu Vatsavayi val64 = readq(oct->mbox[q_no]->mbox_write_reg);
6955d65556bSRaghu Vatsavayi
6965d65556bSRaghu Vatsavayi if (val64 && (val64 != OCTEON_PFVFACK)) {
6975d65556bSRaghu Vatsavayi if (octeon_mbox_read(oct->mbox[q_no]))
6985d65556bSRaghu Vatsavayi octeon_mbox_process_message(
6995d65556bSRaghu Vatsavayi oct->mbox[q_no]);
7005d65556bSRaghu Vatsavayi }
7015d65556bSRaghu Vatsavayi }
7025d65556bSRaghu Vatsavayi
7035d65556bSRaghu Vatsavayi schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
7045d65556bSRaghu Vatsavayi } else {
7055d65556bSRaghu Vatsavayi octeon_mbox_process_message(mbox);
7065d65556bSRaghu Vatsavayi }
7075d65556bSRaghu Vatsavayi }
7085d65556bSRaghu Vatsavayi
cn23xx_setup_pf_mbox(struct octeon_device * oct)7095d65556bSRaghu Vatsavayi static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
7105d65556bSRaghu Vatsavayi {
7115d65556bSRaghu Vatsavayi struct octeon_mbox *mbox = NULL;
7125d65556bSRaghu Vatsavayi u16 mac_no = oct->pcie_port;
7135d65556bSRaghu Vatsavayi u16 pf_num = oct->pf_num;
7145d65556bSRaghu Vatsavayi u32 q_no, i;
7155d65556bSRaghu Vatsavayi
7165d65556bSRaghu Vatsavayi if (!oct->sriov_info.max_vfs)
7175d65556bSRaghu Vatsavayi return 0;
7185d65556bSRaghu Vatsavayi
7195d65556bSRaghu Vatsavayi for (i = 0; i < oct->sriov_info.max_vfs; i++) {
7205d65556bSRaghu Vatsavayi q_no = i * oct->sriov_info.rings_per_vf;
7215d65556bSRaghu Vatsavayi
722bb269633SChristophe JAILLET mbox = vzalloc(sizeof(*mbox));
7235d65556bSRaghu Vatsavayi if (!mbox)
7245d65556bSRaghu Vatsavayi goto free_mbox;
7255d65556bSRaghu Vatsavayi
7265d65556bSRaghu Vatsavayi spin_lock_init(&mbox->lock);
7275d65556bSRaghu Vatsavayi
7285d65556bSRaghu Vatsavayi mbox->oct_dev = oct;
7295d65556bSRaghu Vatsavayi
7305d65556bSRaghu Vatsavayi mbox->q_no = q_no;
7315d65556bSRaghu Vatsavayi
7325d65556bSRaghu Vatsavayi mbox->state = OCTEON_MBOX_STATE_IDLE;
7335d65556bSRaghu Vatsavayi
7345d65556bSRaghu Vatsavayi /* PF mbox interrupt reg */
7355d65556bSRaghu Vatsavayi mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
7365d65556bSRaghu Vatsavayi CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num);
7375d65556bSRaghu Vatsavayi
7385d65556bSRaghu Vatsavayi /* PF writes into SIG0 reg */
7395d65556bSRaghu Vatsavayi mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
7405d65556bSRaghu Vatsavayi CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0);
7415d65556bSRaghu Vatsavayi
7425d65556bSRaghu Vatsavayi /* PF reads from SIG1 reg */
7435d65556bSRaghu Vatsavayi mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
7445d65556bSRaghu Vatsavayi CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
7455d65556bSRaghu Vatsavayi
7465d65556bSRaghu Vatsavayi /*Mail Box Thread creation*/
7475d65556bSRaghu Vatsavayi INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
7485d65556bSRaghu Vatsavayi cn23xx_pf_mbox_thread);
7495d65556bSRaghu Vatsavayi mbox->mbox_poll_wk.ctxptr = (void *)mbox;
7505d65556bSRaghu Vatsavayi
7515d65556bSRaghu Vatsavayi oct->mbox[q_no] = mbox;
7525d65556bSRaghu Vatsavayi
7535d65556bSRaghu Vatsavayi writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
7545d65556bSRaghu Vatsavayi }
7555d65556bSRaghu Vatsavayi
7565d65556bSRaghu Vatsavayi if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
7575d65556bSRaghu Vatsavayi schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
7585d65556bSRaghu Vatsavayi msecs_to_jiffies(0));
7595d65556bSRaghu Vatsavayi
7605d65556bSRaghu Vatsavayi return 0;
7615d65556bSRaghu Vatsavayi
7625d65556bSRaghu Vatsavayi free_mbox:
7635d65556bSRaghu Vatsavayi while (i) {
7645d65556bSRaghu Vatsavayi i--;
7655d65556bSRaghu Vatsavayi vfree(oct->mbox[i]);
7665d65556bSRaghu Vatsavayi }
7675d65556bSRaghu Vatsavayi
7685d65556bSRaghu Vatsavayi return 1;
7695d65556bSRaghu Vatsavayi }
7705d65556bSRaghu Vatsavayi
cn23xx_free_pf_mbox(struct octeon_device * oct)7715d65556bSRaghu Vatsavayi static int cn23xx_free_pf_mbox(struct octeon_device *oct)
7725d65556bSRaghu Vatsavayi {
7735d65556bSRaghu Vatsavayi u32 q_no, i;
7745d65556bSRaghu Vatsavayi
7755d65556bSRaghu Vatsavayi if (!oct->sriov_info.max_vfs)
7765d65556bSRaghu Vatsavayi return 0;
7775d65556bSRaghu Vatsavayi
7785d65556bSRaghu Vatsavayi for (i = 0; i < oct->sriov_info.max_vfs; i++) {
7795d65556bSRaghu Vatsavayi q_no = i * oct->sriov_info.rings_per_vf;
7805d65556bSRaghu Vatsavayi cancel_delayed_work_sync(
7815d65556bSRaghu Vatsavayi &oct->mbox[q_no]->mbox_poll_wk.work);
7825d65556bSRaghu Vatsavayi vfree(oct->mbox[q_no]);
7835d65556bSRaghu Vatsavayi }
7845d65556bSRaghu Vatsavayi
7855d65556bSRaghu Vatsavayi return 0;
7865d65556bSRaghu Vatsavayi }
7875d65556bSRaghu Vatsavayi
cn23xx_enable_io_queues(struct octeon_device * oct)7881b7c55c4SRaghu Vatsavayi static int cn23xx_enable_io_queues(struct octeon_device *oct)
7891b7c55c4SRaghu Vatsavayi {
7901b7c55c4SRaghu Vatsavayi u64 reg_val;
7911b7c55c4SRaghu Vatsavayi u32 srn, ern, q_no;
7921b7c55c4SRaghu Vatsavayi u32 loop = 1000;
7931b7c55c4SRaghu Vatsavayi
7941b7c55c4SRaghu Vatsavayi srn = oct->sriov_info.pf_srn;
7951b7c55c4SRaghu Vatsavayi ern = srn + oct->num_iqs;
7961b7c55c4SRaghu Vatsavayi
7971b7c55c4SRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
7981b7c55c4SRaghu Vatsavayi /* set the corresponding IQ IS_64B bit */
7991b7c55c4SRaghu Vatsavayi if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
8001b7c55c4SRaghu Vatsavayi reg_val = octeon_read_csr64(
8011b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
8021b7c55c4SRaghu Vatsavayi reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
8031b7c55c4SRaghu Vatsavayi octeon_write_csr64(
8041b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
8051b7c55c4SRaghu Vatsavayi }
8061b7c55c4SRaghu Vatsavayi
8071b7c55c4SRaghu Vatsavayi /* set the corresponding IQ ENB bit */
8081b7c55c4SRaghu Vatsavayi if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
8091b7c55c4SRaghu Vatsavayi /* IOQs are in reset by default in PEM2 mode,
8101b7c55c4SRaghu Vatsavayi * clearing reset bit
8111b7c55c4SRaghu Vatsavayi */
8121b7c55c4SRaghu Vatsavayi reg_val = octeon_read_csr64(
8131b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
8141b7c55c4SRaghu Vatsavayi
8151b7c55c4SRaghu Vatsavayi if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
8161b7c55c4SRaghu Vatsavayi while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
8171b7c55c4SRaghu Vatsavayi !(reg_val &
8181b7c55c4SRaghu Vatsavayi CN23XX_PKT_INPUT_CTL_QUIET) &&
81910f6c4d6SDan Carpenter --loop) {
8201b7c55c4SRaghu Vatsavayi reg_val = octeon_read_csr64(
8211b7c55c4SRaghu Vatsavayi oct,
8221b7c55c4SRaghu Vatsavayi CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
8231b7c55c4SRaghu Vatsavayi }
8241b7c55c4SRaghu Vatsavayi if (!loop) {
8251b7c55c4SRaghu Vatsavayi dev_err(&oct->pci_dev->dev,
8261b7c55c4SRaghu Vatsavayi "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
8271b7c55c4SRaghu Vatsavayi q_no);
8281b7c55c4SRaghu Vatsavayi return -1;
8291b7c55c4SRaghu Vatsavayi }
8301b7c55c4SRaghu Vatsavayi reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
8311b7c55c4SRaghu Vatsavayi octeon_write_csr64(
8321b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
8331b7c55c4SRaghu Vatsavayi reg_val);
8341b7c55c4SRaghu Vatsavayi
8351b7c55c4SRaghu Vatsavayi reg_val = octeon_read_csr64(
8361b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
8371b7c55c4SRaghu Vatsavayi if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
8381b7c55c4SRaghu Vatsavayi dev_err(&oct->pci_dev->dev,
8391b7c55c4SRaghu Vatsavayi "clearing the reset failed for qno: %u\n",
8401b7c55c4SRaghu Vatsavayi q_no);
8411b7c55c4SRaghu Vatsavayi return -1;
8421b7c55c4SRaghu Vatsavayi }
8431b7c55c4SRaghu Vatsavayi }
8441b7c55c4SRaghu Vatsavayi reg_val = octeon_read_csr64(
8451b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
8461b7c55c4SRaghu Vatsavayi reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
8471b7c55c4SRaghu Vatsavayi octeon_write_csr64(
8481b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
8491b7c55c4SRaghu Vatsavayi }
8501b7c55c4SRaghu Vatsavayi }
8511b7c55c4SRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
8521b7c55c4SRaghu Vatsavayi u32 reg_val;
8531b7c55c4SRaghu Vatsavayi /* set the corresponding OQ ENB bit */
8541b7c55c4SRaghu Vatsavayi if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
8551b7c55c4SRaghu Vatsavayi reg_val = octeon_read_csr(
8561b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
8571b7c55c4SRaghu Vatsavayi reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
8581b7c55c4SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
8591b7c55c4SRaghu Vatsavayi reg_val);
8601b7c55c4SRaghu Vatsavayi }
8611b7c55c4SRaghu Vatsavayi }
8621b7c55c4SRaghu Vatsavayi return 0;
8631b7c55c4SRaghu Vatsavayi }
8641b7c55c4SRaghu Vatsavayi
cn23xx_disable_io_queues(struct octeon_device * oct)8651b7c55c4SRaghu Vatsavayi static void cn23xx_disable_io_queues(struct octeon_device *oct)
8661b7c55c4SRaghu Vatsavayi {
8671b7c55c4SRaghu Vatsavayi int q_no, loop;
8681b7c55c4SRaghu Vatsavayi u64 d64;
8691b7c55c4SRaghu Vatsavayi u32 d32;
8701b7c55c4SRaghu Vatsavayi u32 srn, ern;
8711b7c55c4SRaghu Vatsavayi
8721b7c55c4SRaghu Vatsavayi srn = oct->sriov_info.pf_srn;
8731b7c55c4SRaghu Vatsavayi ern = srn + oct->num_iqs;
8741b7c55c4SRaghu Vatsavayi
8751b7c55c4SRaghu Vatsavayi /*** Disable Input Queues. ***/
8761b7c55c4SRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
8771b7c55c4SRaghu Vatsavayi loop = HZ;
8781b7c55c4SRaghu Vatsavayi
8791b7c55c4SRaghu Vatsavayi /* start the Reset for a particular ring */
8801b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, octeon_read_csr64(
8811b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
8821b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, READ_ONCE(d64) &
8831b7c55c4SRaghu Vatsavayi (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
8841b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
8851b7c55c4SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
8861b7c55c4SRaghu Vatsavayi READ_ONCE(d64));
8871b7c55c4SRaghu Vatsavayi
8881b7c55c4SRaghu Vatsavayi /* Wait until hardware indicates that the particular IQ
8891b7c55c4SRaghu Vatsavayi * is out of reset.
8901b7c55c4SRaghu Vatsavayi */
8911b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, octeon_read_csr64(
8921b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_PKT_IOQ_RING_RST));
8931b7c55c4SRaghu Vatsavayi while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
8941b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, octeon_read_csr64(
8951b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_PKT_IOQ_RING_RST));
8961b7c55c4SRaghu Vatsavayi schedule_timeout_uninterruptible(1);
8971b7c55c4SRaghu Vatsavayi }
8981b7c55c4SRaghu Vatsavayi
8991b7c55c4SRaghu Vatsavayi /* Reset the doorbell register for this Input Queue. */
9001b7c55c4SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
9011b7c55c4SRaghu Vatsavayi while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
9021b7c55c4SRaghu Vatsavayi loop--) {
9031b7c55c4SRaghu Vatsavayi schedule_timeout_uninterruptible(1);
9041b7c55c4SRaghu Vatsavayi }
9051b7c55c4SRaghu Vatsavayi }
9061b7c55c4SRaghu Vatsavayi
9071b7c55c4SRaghu Vatsavayi /*** Disable Output Queues. ***/
9081b7c55c4SRaghu Vatsavayi for (q_no = srn; q_no < ern; q_no++) {
9091b7c55c4SRaghu Vatsavayi loop = HZ;
9101b7c55c4SRaghu Vatsavayi
9111b7c55c4SRaghu Vatsavayi /* Wait until hardware indicates that the particular IQ
9121b7c55c4SRaghu Vatsavayi * is out of reset.It given that SLI_PKT_RING_RST is
9131b7c55c4SRaghu Vatsavayi * common for both IQs and OQs
9141b7c55c4SRaghu Vatsavayi */
9151b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, octeon_read_csr64(
9161b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_PKT_IOQ_RING_RST));
9171b7c55c4SRaghu Vatsavayi while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
9181b7c55c4SRaghu Vatsavayi WRITE_ONCE(d64, octeon_read_csr64(
9191b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_PKT_IOQ_RING_RST));
9201b7c55c4SRaghu Vatsavayi schedule_timeout_uninterruptible(1);
9211b7c55c4SRaghu Vatsavayi }
9221b7c55c4SRaghu Vatsavayi
9231b7c55c4SRaghu Vatsavayi /* Reset the doorbell register for this Output Queue. */
9241b7c55c4SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
9251b7c55c4SRaghu Vatsavayi 0xFFFFFFFF);
9261b7c55c4SRaghu Vatsavayi while (octeon_read_csr64(oct,
9271b7c55c4SRaghu Vatsavayi CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
9281b7c55c4SRaghu Vatsavayi loop--) {
9291b7c55c4SRaghu Vatsavayi schedule_timeout_uninterruptible(1);
9301b7c55c4SRaghu Vatsavayi }
9311b7c55c4SRaghu Vatsavayi
9321b7c55c4SRaghu Vatsavayi /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
9331b7c55c4SRaghu Vatsavayi WRITE_ONCE(d32, octeon_read_csr(
9341b7c55c4SRaghu Vatsavayi oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
9351b7c55c4SRaghu Vatsavayi octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
9361b7c55c4SRaghu Vatsavayi READ_ONCE(d32));
9371b7c55c4SRaghu Vatsavayi }
9381b7c55c4SRaghu Vatsavayi }
9391b7c55c4SRaghu Vatsavayi
cn23xx_pf_msix_interrupt_handler(void * dev)9405b07aee1SRaghu Vatsavayi static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
9415b07aee1SRaghu Vatsavayi {
9425b07aee1SRaghu Vatsavayi struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
9435b07aee1SRaghu Vatsavayi struct octeon_device *oct = ioq_vector->oct_dev;
9445b07aee1SRaghu Vatsavayi u64 pkts_sent;
9455b07aee1SRaghu Vatsavayi u64 ret = 0;
9465b07aee1SRaghu Vatsavayi struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
9475b07aee1SRaghu Vatsavayi
9485b07aee1SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
9495b07aee1SRaghu Vatsavayi
9505b07aee1SRaghu Vatsavayi if (!droq) {
9515b07aee1SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
9525b07aee1SRaghu Vatsavayi oct->pf_num, ioq_vector->ioq_num);
9535b07aee1SRaghu Vatsavayi return 0;
9545b07aee1SRaghu Vatsavayi }
9555b07aee1SRaghu Vatsavayi
9565b07aee1SRaghu Vatsavayi pkts_sent = readq(droq->pkts_sent_reg);
9575b07aee1SRaghu Vatsavayi
9585b07aee1SRaghu Vatsavayi /* If our device has interrupted, then proceed. Also check
9595b07aee1SRaghu Vatsavayi * for all f's if interrupt was triggered on an error
9605b07aee1SRaghu Vatsavayi * and the PCI read fails.
9615b07aee1SRaghu Vatsavayi */
9625b07aee1SRaghu Vatsavayi if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
9635b07aee1SRaghu Vatsavayi return ret;
9645b07aee1SRaghu Vatsavayi
9655b07aee1SRaghu Vatsavayi /* Write count reg in sli_pkt_cnts to clear these int.*/
9665b07aee1SRaghu Vatsavayi if ((pkts_sent & CN23XX_INTR_PO_INT) ||
9675b07aee1SRaghu Vatsavayi (pkts_sent & CN23XX_INTR_PI_INT)) {
9685b07aee1SRaghu Vatsavayi if (pkts_sent & CN23XX_INTR_PO_INT)
9695b07aee1SRaghu Vatsavayi ret |= MSIX_PO_INT;
9705b07aee1SRaghu Vatsavayi }
9715b07aee1SRaghu Vatsavayi
9725b07aee1SRaghu Vatsavayi if (pkts_sent & CN23XX_INTR_PI_INT)
9735b07aee1SRaghu Vatsavayi /* We will clear the count when we update the read_index. */
9745b07aee1SRaghu Vatsavayi ret |= MSIX_PI_INT;
9755b07aee1SRaghu Vatsavayi
9765b07aee1SRaghu Vatsavayi /* Never need to handle msix mbox intr for pf. They arrive on the last
9775b07aee1SRaghu Vatsavayi * msix
9785b07aee1SRaghu Vatsavayi */
9795b07aee1SRaghu Vatsavayi return ret;
9805b07aee1SRaghu Vatsavayi }
9815b07aee1SRaghu Vatsavayi
cn23xx_handle_pf_mbox_intr(struct octeon_device * oct)9825d65556bSRaghu Vatsavayi static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
9835d65556bSRaghu Vatsavayi {
9845d65556bSRaghu Vatsavayi struct delayed_work *work;
9855d65556bSRaghu Vatsavayi u64 mbox_int_val;
9865d65556bSRaghu Vatsavayi u32 i, q_no;
9875d65556bSRaghu Vatsavayi
9885d65556bSRaghu Vatsavayi mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
9895d65556bSRaghu Vatsavayi
9905d65556bSRaghu Vatsavayi for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
9915d65556bSRaghu Vatsavayi q_no = i * oct->sriov_info.rings_per_vf;
9925d65556bSRaghu Vatsavayi
9935d65556bSRaghu Vatsavayi if (mbox_int_val & BIT_ULL(q_no)) {
9945d65556bSRaghu Vatsavayi writeq(BIT_ULL(q_no),
9955d65556bSRaghu Vatsavayi oct->mbox[0]->mbox_int_reg);
9965d65556bSRaghu Vatsavayi if (octeon_mbox_read(oct->mbox[q_no])) {
9975d65556bSRaghu Vatsavayi work = &oct->mbox[q_no]->mbox_poll_wk.work;
9985d65556bSRaghu Vatsavayi schedule_delayed_work(work,
9995d65556bSRaghu Vatsavayi msecs_to_jiffies(0));
10005d65556bSRaghu Vatsavayi }
10015d65556bSRaghu Vatsavayi }
10025d65556bSRaghu Vatsavayi }
10035d65556bSRaghu Vatsavayi }
10045d65556bSRaghu Vatsavayi
cn23xx_interrupt_handler(void * dev)10055b07aee1SRaghu Vatsavayi static irqreturn_t cn23xx_interrupt_handler(void *dev)
10065b07aee1SRaghu Vatsavayi {
10075b07aee1SRaghu Vatsavayi struct octeon_device *oct = (struct octeon_device *)dev;
10085b07aee1SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
10095b07aee1SRaghu Vatsavayi u64 intr64;
10105b07aee1SRaghu Vatsavayi
10115b07aee1SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
10125b07aee1SRaghu Vatsavayi intr64 = readq(cn23xx->intr_sum_reg64);
10135b07aee1SRaghu Vatsavayi
10145b07aee1SRaghu Vatsavayi oct->int_status = 0;
10155b07aee1SRaghu Vatsavayi
10165b07aee1SRaghu Vatsavayi if (intr64 & CN23XX_INTR_ERR)
10175b07aee1SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
10185b07aee1SRaghu Vatsavayi oct->octeon_id, CVM_CAST64(intr64));
10195b07aee1SRaghu Vatsavayi
10205d65556bSRaghu Vatsavayi /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */
10215d65556bSRaghu Vatsavayi if (intr64 & CN23XX_INTR_VF_MBOX)
10225d65556bSRaghu Vatsavayi cn23xx_handle_pf_mbox_intr(oct);
10235d65556bSRaghu Vatsavayi
10245b07aee1SRaghu Vatsavayi if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
10255b07aee1SRaghu Vatsavayi if (intr64 & CN23XX_INTR_PKT_DATA)
10265b07aee1SRaghu Vatsavayi oct->int_status |= OCT_DEV_INTR_PKT_DATA;
10275b07aee1SRaghu Vatsavayi }
10285b07aee1SRaghu Vatsavayi
10295b07aee1SRaghu Vatsavayi if (intr64 & (CN23XX_INTR_DMA0_FORCE))
10305b07aee1SRaghu Vatsavayi oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
10315b07aee1SRaghu Vatsavayi if (intr64 & (CN23XX_INTR_DMA1_FORCE))
10325b07aee1SRaghu Vatsavayi oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
10335b07aee1SRaghu Vatsavayi
10345b07aee1SRaghu Vatsavayi /* Clear the current interrupts */
10355b07aee1SRaghu Vatsavayi writeq(intr64, cn23xx->intr_sum_reg64);
10365b07aee1SRaghu Vatsavayi
10375b07aee1SRaghu Vatsavayi return IRQ_HANDLED;
10385b07aee1SRaghu Vatsavayi }
10395b07aee1SRaghu Vatsavayi
cn23xx_bar1_idx_setup(struct octeon_device * oct,u64 core_addr,u32 idx,int valid)10409bdd4609SRaghu Vatsavayi static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
10419bdd4609SRaghu Vatsavayi u32 idx, int valid)
10429bdd4609SRaghu Vatsavayi {
10439bdd4609SRaghu Vatsavayi u64 bar1;
10449bdd4609SRaghu Vatsavayi u64 reg_adr;
10459bdd4609SRaghu Vatsavayi
10469bdd4609SRaghu Vatsavayi if (!valid) {
10479bdd4609SRaghu Vatsavayi reg_adr = lio_pci_readq(
10489bdd4609SRaghu Vatsavayi oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
10499bdd4609SRaghu Vatsavayi WRITE_ONCE(bar1, reg_adr);
10509bdd4609SRaghu Vatsavayi lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
10519bdd4609SRaghu Vatsavayi CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
10529bdd4609SRaghu Vatsavayi reg_adr = lio_pci_readq(
10539bdd4609SRaghu Vatsavayi oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
10549bdd4609SRaghu Vatsavayi WRITE_ONCE(bar1, reg_adr);
10559bdd4609SRaghu Vatsavayi return;
10569bdd4609SRaghu Vatsavayi }
10579bdd4609SRaghu Vatsavayi
10589bdd4609SRaghu Vatsavayi /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
10599bdd4609SRaghu Vatsavayi * bits <41:22> of the Core Addr
10609bdd4609SRaghu Vatsavayi */
10619bdd4609SRaghu Vatsavayi lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
10629bdd4609SRaghu Vatsavayi CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
10639bdd4609SRaghu Vatsavayi
10649bdd4609SRaghu Vatsavayi WRITE_ONCE(bar1, lio_pci_readq(
10659bdd4609SRaghu Vatsavayi oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
10669bdd4609SRaghu Vatsavayi }
10679bdd4609SRaghu Vatsavayi
cn23xx_bar1_idx_write(struct octeon_device * oct,u32 idx,u32 mask)10689bdd4609SRaghu Vatsavayi static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
10699bdd4609SRaghu Vatsavayi {
10709bdd4609SRaghu Vatsavayi lio_pci_writeq(oct, mask,
10719bdd4609SRaghu Vatsavayi CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
10729bdd4609SRaghu Vatsavayi }
10739bdd4609SRaghu Vatsavayi
cn23xx_bar1_idx_read(struct octeon_device * oct,u32 idx)10749bdd4609SRaghu Vatsavayi static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
10759bdd4609SRaghu Vatsavayi {
10769bdd4609SRaghu Vatsavayi return (u32)lio_pci_readq(
10779bdd4609SRaghu Vatsavayi oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
10789bdd4609SRaghu Vatsavayi }
10799bdd4609SRaghu Vatsavayi
10809bdd4609SRaghu Vatsavayi /* always call with lock held */
cn23xx_update_read_index(struct octeon_instr_queue * iq)10819bdd4609SRaghu Vatsavayi static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
10829bdd4609SRaghu Vatsavayi {
10839bdd4609SRaghu Vatsavayi u32 new_idx;
10849bdd4609SRaghu Vatsavayi u32 last_done;
10859bdd4609SRaghu Vatsavayi u32 pkt_in_done = readl(iq->inst_cnt_reg);
10869bdd4609SRaghu Vatsavayi
10879bdd4609SRaghu Vatsavayi last_done = pkt_in_done - iq->pkt_in_done;
10889bdd4609SRaghu Vatsavayi iq->pkt_in_done = pkt_in_done;
10899bdd4609SRaghu Vatsavayi
10909bdd4609SRaghu Vatsavayi /* Modulo of the new index with the IQ size will give us
10919bdd4609SRaghu Vatsavayi * the new index. The iq->reset_instr_cnt is always zero for
10929bdd4609SRaghu Vatsavayi * cn23xx, so no extra adjustments are needed.
10939bdd4609SRaghu Vatsavayi */
10949bdd4609SRaghu Vatsavayi new_idx = (iq->octeon_read_index +
10959bdd4609SRaghu Vatsavayi (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
10969bdd4609SRaghu Vatsavayi iq->max_count;
10979bdd4609SRaghu Vatsavayi
10989bdd4609SRaghu Vatsavayi return new_idx;
10999bdd4609SRaghu Vatsavayi }
11009bdd4609SRaghu Vatsavayi
cn23xx_enable_pf_interrupt(struct octeon_device * oct,u8 intr_flag)11015b07aee1SRaghu Vatsavayi static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
11025b07aee1SRaghu Vatsavayi {
11035b07aee1SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
11045b07aee1SRaghu Vatsavayi u64 intr_val = 0;
11055b07aee1SRaghu Vatsavayi
11065b07aee1SRaghu Vatsavayi /* Divide the single write to multiple writes based on the flag. */
11075b07aee1SRaghu Vatsavayi /* Enable Interrupt */
11085b07aee1SRaghu Vatsavayi if (intr_flag == OCTEON_ALL_INTR) {
11095b07aee1SRaghu Vatsavayi writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
11105b07aee1SRaghu Vatsavayi } else if (intr_flag & OCTEON_OUTPUT_INTR) {
11115b07aee1SRaghu Vatsavayi intr_val = readq(cn23xx->intr_enb_reg64);
11125b07aee1SRaghu Vatsavayi intr_val |= CN23XX_INTR_PKT_DATA;
11135b07aee1SRaghu Vatsavayi writeq(intr_val, cn23xx->intr_enb_reg64);
11145d65556bSRaghu Vatsavayi } else if ((intr_flag & OCTEON_MBOX_INTR) &&
11155d65556bSRaghu Vatsavayi (oct->sriov_info.max_vfs > 0)) {
11165d65556bSRaghu Vatsavayi if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
11175d65556bSRaghu Vatsavayi intr_val = readq(cn23xx->intr_enb_reg64);
11185d65556bSRaghu Vatsavayi intr_val |= CN23XX_INTR_VF_MBOX;
11195d65556bSRaghu Vatsavayi writeq(intr_val, cn23xx->intr_enb_reg64);
11205d65556bSRaghu Vatsavayi }
11215b07aee1SRaghu Vatsavayi }
11225b07aee1SRaghu Vatsavayi }
11235b07aee1SRaghu Vatsavayi
cn23xx_disable_pf_interrupt(struct octeon_device * oct,u8 intr_flag)11245b07aee1SRaghu Vatsavayi static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
11255b07aee1SRaghu Vatsavayi {
11265b07aee1SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
11275b07aee1SRaghu Vatsavayi u64 intr_val = 0;
11285b07aee1SRaghu Vatsavayi
11295b07aee1SRaghu Vatsavayi /* Disable Interrupts */
11305b07aee1SRaghu Vatsavayi if (intr_flag == OCTEON_ALL_INTR) {
11315b07aee1SRaghu Vatsavayi writeq(0, cn23xx->intr_enb_reg64);
11325b07aee1SRaghu Vatsavayi } else if (intr_flag & OCTEON_OUTPUT_INTR) {
11335b07aee1SRaghu Vatsavayi intr_val = readq(cn23xx->intr_enb_reg64);
11345b07aee1SRaghu Vatsavayi intr_val &= ~CN23XX_INTR_PKT_DATA;
11355b07aee1SRaghu Vatsavayi writeq(intr_val, cn23xx->intr_enb_reg64);
11365d65556bSRaghu Vatsavayi } else if ((intr_flag & OCTEON_MBOX_INTR) &&
11375d65556bSRaghu Vatsavayi (oct->sriov_info.max_vfs > 0)) {
11385d65556bSRaghu Vatsavayi if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
11395d65556bSRaghu Vatsavayi intr_val = readq(cn23xx->intr_enb_reg64);
11405d65556bSRaghu Vatsavayi intr_val &= ~CN23XX_INTR_VF_MBOX;
11415d65556bSRaghu Vatsavayi writeq(intr_val, cn23xx->intr_enb_reg64);
11425d65556bSRaghu Vatsavayi }
11435b07aee1SRaghu Vatsavayi }
11445b07aee1SRaghu Vatsavayi }
11455b07aee1SRaghu Vatsavayi
cn23xx_get_pcie_qlmport(struct octeon_device * oct)114672c00912SRaghu Vatsavayi static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
114772c00912SRaghu Vatsavayi {
114872c00912SRaghu Vatsavayi oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
114972c00912SRaghu Vatsavayi
115072c00912SRaghu Vatsavayi dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
115172c00912SRaghu Vatsavayi oct->pcie_port);
115272c00912SRaghu Vatsavayi }
115372c00912SRaghu Vatsavayi
cn23xx_get_pf_num(struct octeon_device * oct)11540c45d7feSRick Farrington static int cn23xx_get_pf_num(struct octeon_device *oct)
115572c00912SRaghu Vatsavayi {
115672c00912SRaghu Vatsavayi u32 fdl_bit = 0;
11570c45d7feSRick Farrington u64 pkt0_in_ctl, d64;
11580c45d7feSRick Farrington int pfnum, mac, trs, ret;
11590c45d7feSRick Farrington
11600c45d7feSRick Farrington ret = 0;
116172c00912SRaghu Vatsavayi
116272c00912SRaghu Vatsavayi /** Read Function Dependency Link reg to get the function number */
11630c45d7feSRick Farrington if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
11640c45d7feSRick Farrington &fdl_bit) == 0) {
116572c00912SRaghu Vatsavayi oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
116672c00912SRaghu Vatsavayi CN23XX_PCIE_SRIOV_FDL_MASK);
11670c45d7feSRick Farrington } else {
1168aa027850STianjia Zhang ret = -EINVAL;
11690c45d7feSRick Farrington
11700c45d7feSRick Farrington /* Under some virtual environments, extended PCI regs are
11710c45d7feSRick Farrington * inaccessible, in which case the above read will have failed.
11720c45d7feSRick Farrington * In this case, read the PF number from the
11730c45d7feSRick Farrington * SLI_PKT0_INPUT_CONTROL reg (written by f/w)
11740c45d7feSRick Farrington */
11750c45d7feSRick Farrington pkt0_in_ctl = octeon_read_csr64(oct,
11760c45d7feSRick Farrington CN23XX_SLI_IQ_PKT_CONTROL64(0));
11770c45d7feSRick Farrington pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
11780c45d7feSRick Farrington CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
11790c45d7feSRick Farrington mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
11800c45d7feSRick Farrington
11810c45d7feSRick Farrington /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
11820c45d7feSRick Farrington d64 = octeon_read_csr64(oct,
11830c45d7feSRick Farrington CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
11840c45d7feSRick Farrington trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
11850c45d7feSRick Farrington if (trs == 1) {
11860c45d7feSRick Farrington dev_err(&oct->pci_dev->dev,
11870c45d7feSRick Farrington "OCTEON: error reading PCI cfg space pfnum, re-read %u\n",
11880c45d7feSRick Farrington pfnum);
11890c45d7feSRick Farrington oct->pf_num = pfnum;
11900c45d7feSRick Farrington ret = 0;
11910c45d7feSRick Farrington } else {
11920c45d7feSRick Farrington dev_err(&oct->pci_dev->dev,
11930c45d7feSRick Farrington "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n");
11940c45d7feSRick Farrington }
11950c45d7feSRick Farrington }
11960c45d7feSRick Farrington
11970c45d7feSRick Farrington return ret;
119872c00912SRaghu Vatsavayi }
119972c00912SRaghu Vatsavayi
cn23xx_setup_reg_address(struct octeon_device * oct)120072c00912SRaghu Vatsavayi static void cn23xx_setup_reg_address(struct octeon_device *oct)
120172c00912SRaghu Vatsavayi {
120272c00912SRaghu Vatsavayi u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
120372c00912SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
120472c00912SRaghu Vatsavayi
120572c00912SRaghu Vatsavayi oct->reg_list.pci_win_wr_addr_hi =
120672c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
120772c00912SRaghu Vatsavayi oct->reg_list.pci_win_wr_addr_lo =
120872c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
120972c00912SRaghu Vatsavayi oct->reg_list.pci_win_wr_addr =
121072c00912SRaghu Vatsavayi (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
121172c00912SRaghu Vatsavayi
121272c00912SRaghu Vatsavayi oct->reg_list.pci_win_rd_addr_hi =
121372c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
121472c00912SRaghu Vatsavayi oct->reg_list.pci_win_rd_addr_lo =
121572c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
121672c00912SRaghu Vatsavayi oct->reg_list.pci_win_rd_addr =
121772c00912SRaghu Vatsavayi (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
121872c00912SRaghu Vatsavayi
121972c00912SRaghu Vatsavayi oct->reg_list.pci_win_wr_data_hi =
122072c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
122172c00912SRaghu Vatsavayi oct->reg_list.pci_win_wr_data_lo =
122272c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
122372c00912SRaghu Vatsavayi oct->reg_list.pci_win_wr_data =
122472c00912SRaghu Vatsavayi (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
122572c00912SRaghu Vatsavayi
122672c00912SRaghu Vatsavayi oct->reg_list.pci_win_rd_data_hi =
122772c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
122872c00912SRaghu Vatsavayi oct->reg_list.pci_win_rd_data_lo =
122972c00912SRaghu Vatsavayi (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
123072c00912SRaghu Vatsavayi oct->reg_list.pci_win_rd_data =
123172c00912SRaghu Vatsavayi (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
123272c00912SRaghu Vatsavayi
123372c00912SRaghu Vatsavayi cn23xx_get_pcie_qlmport(oct);
123472c00912SRaghu Vatsavayi
123572c00912SRaghu Vatsavayi cn23xx->intr_mask64 = CN23XX_INTR_MASK;
12365b07aee1SRaghu Vatsavayi if (!oct->msix_on)
123772c00912SRaghu Vatsavayi cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
123872c00912SRaghu Vatsavayi if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
123972c00912SRaghu Vatsavayi cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
124072c00912SRaghu Vatsavayi
124172c00912SRaghu Vatsavayi cn23xx->intr_sum_reg64 =
124272c00912SRaghu Vatsavayi bar0_pciaddr +
124372c00912SRaghu Vatsavayi CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
124472c00912SRaghu Vatsavayi cn23xx->intr_enb_reg64 =
124572c00912SRaghu Vatsavayi bar0_pciaddr +
124672c00912SRaghu Vatsavayi CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
124772c00912SRaghu Vatsavayi }
124872c00912SRaghu Vatsavayi
cn23xx_sriov_config(struct octeon_device * oct)1249c33c9973SIntiyaz Basha int cn23xx_sriov_config(struct octeon_device *oct)
125072c00912SRaghu Vatsavayi {
125172c00912SRaghu Vatsavayi struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1252d13520c7SRaghu Vatsavayi u32 max_rings, total_rings, max_vfs, rings_per_vf;
125372c00912SRaghu Vatsavayi u32 pf_srn, num_pf_rings;
1254d13520c7SRaghu Vatsavayi u32 max_possible_vfs;
125572c00912SRaghu Vatsavayi
125672c00912SRaghu Vatsavayi cn23xx->conf =
125772c00912SRaghu Vatsavayi (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
125872c00912SRaghu Vatsavayi switch (oct->rev_id) {
125972c00912SRaghu Vatsavayi case OCTEON_CN23XX_REV_1_0:
1260d13520c7SRaghu Vatsavayi max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
1261d13520c7SRaghu Vatsavayi max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0;
126272c00912SRaghu Vatsavayi break;
126372c00912SRaghu Vatsavayi case OCTEON_CN23XX_REV_1_1:
1264d13520c7SRaghu Vatsavayi max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
1265d13520c7SRaghu Vatsavayi max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1;
126672c00912SRaghu Vatsavayi break;
126772c00912SRaghu Vatsavayi default:
1268d13520c7SRaghu Vatsavayi max_rings = CN23XX_MAX_RINGS_PER_PF;
1269d13520c7SRaghu Vatsavayi max_possible_vfs = CN23XX_MAX_VFS_PER_PF;
127072c00912SRaghu Vatsavayi break;
127172c00912SRaghu Vatsavayi }
1272d13520c7SRaghu Vatsavayi
1273c33c9973SIntiyaz Basha if (oct->sriov_info.num_pf_rings)
1274c33c9973SIntiyaz Basha num_pf_rings = oct->sriov_info.num_pf_rings;
127572c00912SRaghu Vatsavayi else
1276d13520c7SRaghu Vatsavayi num_pf_rings = num_present_cpus();
127772c00912SRaghu Vatsavayi
1278d13520c7SRaghu Vatsavayi #ifdef CONFIG_PCI_IOV
1279d13520c7SRaghu Vatsavayi max_vfs = min_t(u32,
1280d13520c7SRaghu Vatsavayi (max_rings - num_pf_rings), max_possible_vfs);
1281d13520c7SRaghu Vatsavayi rings_per_vf = 1;
1282d13520c7SRaghu Vatsavayi #else
1283d13520c7SRaghu Vatsavayi max_vfs = 0;
1284d13520c7SRaghu Vatsavayi rings_per_vf = 0;
1285d13520c7SRaghu Vatsavayi #endif
128672c00912SRaghu Vatsavayi
1287d13520c7SRaghu Vatsavayi total_rings = num_pf_rings + max_vfs;
1288d13520c7SRaghu Vatsavayi
128972c00912SRaghu Vatsavayi /* the first ring of the pf */
129072c00912SRaghu Vatsavayi pf_srn = total_rings - num_pf_rings;
129172c00912SRaghu Vatsavayi
129272c00912SRaghu Vatsavayi oct->sriov_info.trs = total_rings;
1293d13520c7SRaghu Vatsavayi oct->sriov_info.max_vfs = max_vfs;
1294d13520c7SRaghu Vatsavayi oct->sriov_info.rings_per_vf = rings_per_vf;
129572c00912SRaghu Vatsavayi oct->sriov_info.pf_srn = pf_srn;
129672c00912SRaghu Vatsavayi oct->sriov_info.num_pf_rings = num_pf_rings;
1297d13520c7SRaghu Vatsavayi dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
1298d13520c7SRaghu Vatsavayi oct->sriov_info.trs, oct->sriov_info.max_vfs,
1299d13520c7SRaghu Vatsavayi oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
130072c00912SRaghu Vatsavayi oct->sriov_info.num_pf_rings);
1301d13520c7SRaghu Vatsavayi
1302d13520c7SRaghu Vatsavayi oct->sriov_info.sriov_enabled = 0;
1303d13520c7SRaghu Vatsavayi
130472c00912SRaghu Vatsavayi return 0;
130572c00912SRaghu Vatsavayi }
130672c00912SRaghu Vatsavayi
setup_cn23xx_octeon_pf_device(struct octeon_device * oct)130772c00912SRaghu Vatsavayi int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
130872c00912SRaghu Vatsavayi {
1309acfb98b9SRick Farrington u32 data32;
1310acfb98b9SRick Farrington u64 BAR0, BAR1;
1311acfb98b9SRick Farrington
1312acfb98b9SRick Farrington pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
1313acfb98b9SRick Farrington BAR0 = (u64)(data32 & ~0xf);
1314acfb98b9SRick Farrington pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
1315acfb98b9SRick Farrington BAR0 |= ((u64)data32 << 32);
1316acfb98b9SRick Farrington pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
1317acfb98b9SRick Farrington BAR1 = (u64)(data32 & ~0xf);
1318acfb98b9SRick Farrington pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
1319acfb98b9SRick Farrington BAR1 |= ((u64)data32 << 32);
1320acfb98b9SRick Farrington
1321acfb98b9SRick Farrington if (!BAR0 || !BAR1) {
1322acfb98b9SRick Farrington if (!BAR0)
1323acfb98b9SRick Farrington dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
1324acfb98b9SRick Farrington if (!BAR1)
1325acfb98b9SRick Farrington dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
1326acfb98b9SRick Farrington return 1;
1327acfb98b9SRick Farrington }
1328acfb98b9SRick Farrington
132972c00912SRaghu Vatsavayi if (octeon_map_pci_barx(oct, 0, 0))
133072c00912SRaghu Vatsavayi return 1;
133172c00912SRaghu Vatsavayi
133272c00912SRaghu Vatsavayi if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
133372c00912SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
133472c00912SRaghu Vatsavayi __func__);
133572c00912SRaghu Vatsavayi octeon_unmap_pci_barx(oct, 0);
133672c00912SRaghu Vatsavayi return 1;
133772c00912SRaghu Vatsavayi }
133872c00912SRaghu Vatsavayi
13390c45d7feSRick Farrington if (cn23xx_get_pf_num(oct) != 0)
13400c45d7feSRick Farrington return 1;
134172c00912SRaghu Vatsavayi
134272c00912SRaghu Vatsavayi if (cn23xx_sriov_config(oct)) {
134372c00912SRaghu Vatsavayi octeon_unmap_pci_barx(oct, 0);
134472c00912SRaghu Vatsavayi octeon_unmap_pci_barx(oct, 1);
134572c00912SRaghu Vatsavayi return 1;
134672c00912SRaghu Vatsavayi }
134772c00912SRaghu Vatsavayi
134872c00912SRaghu Vatsavayi octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
134972c00912SRaghu Vatsavayi
135072c00912SRaghu Vatsavayi oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
135172c00912SRaghu Vatsavayi oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
13525d65556bSRaghu Vatsavayi oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
13535d65556bSRaghu Vatsavayi oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
13545d65556bSRaghu Vatsavayi
13555b07aee1SRaghu Vatsavayi oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
13565b07aee1SRaghu Vatsavayi oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
13575b07aee1SRaghu Vatsavayi
1358c0eab5b3SRaghu Vatsavayi oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
13593451b97cSRaghu Vatsavayi oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
13609bdd4609SRaghu Vatsavayi oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
13619bdd4609SRaghu Vatsavayi
13629bdd4609SRaghu Vatsavayi oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
13639bdd4609SRaghu Vatsavayi oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
13649bdd4609SRaghu Vatsavayi oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
136572c00912SRaghu Vatsavayi
13665b07aee1SRaghu Vatsavayi oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
13675b07aee1SRaghu Vatsavayi oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
13685b07aee1SRaghu Vatsavayi
13691b7c55c4SRaghu Vatsavayi oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
13701b7c55c4SRaghu Vatsavayi oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
13711b7c55c4SRaghu Vatsavayi
137272c00912SRaghu Vatsavayi cn23xx_setup_reg_address(oct);
137372c00912SRaghu Vatsavayi
137472c00912SRaghu Vatsavayi oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
137572c00912SRaghu Vatsavayi
137672c00912SRaghu Vatsavayi return 0;
137772c00912SRaghu Vatsavayi }
1378*f71be9d0SMasahiro Yamada EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
137972c00912SRaghu Vatsavayi
validate_cn23xx_pf_config_info(struct octeon_device * oct,struct octeon_config * conf23xx)138072c00912SRaghu Vatsavayi int validate_cn23xx_pf_config_info(struct octeon_device *oct,
138172c00912SRaghu Vatsavayi struct octeon_config *conf23xx)
138272c00912SRaghu Vatsavayi {
138372c00912SRaghu Vatsavayi if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
138472c00912SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
138572c00912SRaghu Vatsavayi __func__, CFG_GET_IQ_MAX_Q(conf23xx),
138672c00912SRaghu Vatsavayi CN23XX_MAX_INPUT_QUEUES);
138772c00912SRaghu Vatsavayi return 1;
138872c00912SRaghu Vatsavayi }
138972c00912SRaghu Vatsavayi
139072c00912SRaghu Vatsavayi if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
139172c00912SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
139272c00912SRaghu Vatsavayi __func__, CFG_GET_OQ_MAX_Q(conf23xx),
139372c00912SRaghu Vatsavayi CN23XX_MAX_OUTPUT_QUEUES);
139472c00912SRaghu Vatsavayi return 1;
139572c00912SRaghu Vatsavayi }
139672c00912SRaghu Vatsavayi
139772c00912SRaghu Vatsavayi if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
139872c00912SRaghu Vatsavayi CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
139972c00912SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
140072c00912SRaghu Vatsavayi __func__);
140172c00912SRaghu Vatsavayi return 1;
140272c00912SRaghu Vatsavayi }
140372c00912SRaghu Vatsavayi
1404c4ee5d81SPrasad Kanneganti if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
140572c00912SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
140672c00912SRaghu Vatsavayi __func__);
140772c00912SRaghu Vatsavayi return 1;
140872c00912SRaghu Vatsavayi }
140972c00912SRaghu Vatsavayi
141072c00912SRaghu Vatsavayi if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
141172c00912SRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
141272c00912SRaghu Vatsavayi __func__);
141372c00912SRaghu Vatsavayi return 1;
141472c00912SRaghu Vatsavayi }
141572c00912SRaghu Vatsavayi
141672c00912SRaghu Vatsavayi return 0;
141772c00912SRaghu Vatsavayi }
141872c00912SRaghu Vatsavayi
cn23xx_fw_loaded(struct octeon_device * oct)1419c0eab5b3SRaghu Vatsavayi int cn23xx_fw_loaded(struct octeon_device *oct)
1420c0eab5b3SRaghu Vatsavayi {
1421c0eab5b3SRaghu Vatsavayi u64 val;
1422c0eab5b3SRaghu Vatsavayi
1423b2854772SFelix Manlunas /* If there's more than one active PF on this NIC, then that
1424b2854772SFelix Manlunas * implies that the NIC firmware is loaded and running. This check
1425b2854772SFelix Manlunas * prevents a rare false negative that might occur if we only relied
1426b2854772SFelix Manlunas * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
1427b2854772SFelix Manlunas * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
1428b2854772SFelix Manlunas * though the firmware was already loaded but still booting and has yet
1429b2854772SFelix Manlunas * to set SCR2_BIT_FW_LOADED.
1430b2854772SFelix Manlunas */
1431b2854772SFelix Manlunas if (atomic_read(oct->adapter_refcount) > 1)
1432b2854772SFelix Manlunas return 1;
1433b2854772SFelix Manlunas
1434b2854772SFelix Manlunas val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1435b2854772SFelix Manlunas return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
1436c0eab5b3SRaghu Vatsavayi }
1437*f71be9d0SMasahiro Yamada EXPORT_SYMBOL_GPL(cn23xx_fw_loaded);
143886dea55bSRaghu Vatsavayi
cn23xx_tell_vf_its_macaddr_changed(struct octeon_device * oct,int vfidx,u8 * mac)143986dea55bSRaghu Vatsavayi void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
144086dea55bSRaghu Vatsavayi u8 *mac)
144186dea55bSRaghu Vatsavayi {
144286dea55bSRaghu Vatsavayi if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
144386dea55bSRaghu Vatsavayi struct octeon_mbox_cmd mbox_cmd;
144486dea55bSRaghu Vatsavayi
144586dea55bSRaghu Vatsavayi mbox_cmd.msg.u64 = 0;
144686dea55bSRaghu Vatsavayi mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
144786dea55bSRaghu Vatsavayi mbox_cmd.msg.s.resp_needed = 0;
144886dea55bSRaghu Vatsavayi mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR;
144986dea55bSRaghu Vatsavayi mbox_cmd.msg.s.len = 1;
145086dea55bSRaghu Vatsavayi mbox_cmd.recv_len = 0;
145186dea55bSRaghu Vatsavayi mbox_cmd.recv_status = 0;
145286dea55bSRaghu Vatsavayi mbox_cmd.fn = NULL;
1453bf9d787bSYueHaibing mbox_cmd.fn_arg = NULL;
145486dea55bSRaghu Vatsavayi ether_addr_copy(mbox_cmd.msg.s.params, mac);
145586dea55bSRaghu Vatsavayi mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
145686dea55bSRaghu Vatsavayi octeon_mbox_write(oct, &mbox_cmd);
145786dea55bSRaghu Vatsavayi }
145886dea55bSRaghu Vatsavayi }
1459*f71be9d0SMasahiro Yamada EXPORT_SYMBOL_GPL(cn23xx_tell_vf_its_macaddr_changed);
1460cea395acSIntiyaz Basha
1461cea395acSIntiyaz Basha static void
cn23xx_get_vf_stats_callback(struct octeon_device * oct,struct octeon_mbox_cmd * cmd,void * arg)1462cea395acSIntiyaz Basha cn23xx_get_vf_stats_callback(struct octeon_device *oct,
1463cea395acSIntiyaz Basha struct octeon_mbox_cmd *cmd, void *arg)
1464cea395acSIntiyaz Basha {
1465cea395acSIntiyaz Basha struct oct_vf_stats_ctx *ctx = arg;
1466cea395acSIntiyaz Basha
1467cea395acSIntiyaz Basha memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
1468cea395acSIntiyaz Basha atomic_set(&ctx->status, 1);
1469cea395acSIntiyaz Basha }
1470cea395acSIntiyaz Basha
cn23xx_get_vf_stats(struct octeon_device * oct,int vfidx,struct oct_vf_stats * stats)1471cea395acSIntiyaz Basha int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
1472cea395acSIntiyaz Basha struct oct_vf_stats *stats)
1473cea395acSIntiyaz Basha {
1474cea395acSIntiyaz Basha u32 timeout = HZ; // 1sec
1475cea395acSIntiyaz Basha struct octeon_mbox_cmd mbox_cmd;
1476cea395acSIntiyaz Basha struct oct_vf_stats_ctx ctx;
1477cea395acSIntiyaz Basha u32 count = 0, ret;
1478cea395acSIntiyaz Basha
1479cea395acSIntiyaz Basha if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
1480cea395acSIntiyaz Basha return -1;
1481cea395acSIntiyaz Basha
1482cea395acSIntiyaz Basha if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
1483cea395acSIntiyaz Basha return -1;
1484cea395acSIntiyaz Basha
1485cea395acSIntiyaz Basha mbox_cmd.msg.u64 = 0;
1486cea395acSIntiyaz Basha mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1487cea395acSIntiyaz Basha mbox_cmd.msg.s.resp_needed = 1;
1488cea395acSIntiyaz Basha mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
1489cea395acSIntiyaz Basha mbox_cmd.msg.s.len = 1;
1490cea395acSIntiyaz Basha mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1491cea395acSIntiyaz Basha mbox_cmd.recv_len = 0;
1492cea395acSIntiyaz Basha mbox_cmd.recv_status = 0;
1493cea395acSIntiyaz Basha mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
1494cea395acSIntiyaz Basha ctx.stats = stats;
1495cea395acSIntiyaz Basha atomic_set(&ctx.status, 0);
1496cea395acSIntiyaz Basha mbox_cmd.fn_arg = (void *)&ctx;
1497cea395acSIntiyaz Basha memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
1498cea395acSIntiyaz Basha octeon_mbox_write(oct, &mbox_cmd);
1499cea395acSIntiyaz Basha
1500cea395acSIntiyaz Basha do {
1501cea395acSIntiyaz Basha schedule_timeout_uninterruptible(1);
1502cea395acSIntiyaz Basha } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
1503cea395acSIntiyaz Basha
1504cea395acSIntiyaz Basha ret = atomic_read(&ctx.status);
1505cea395acSIntiyaz Basha if (ret == 0) {
1506cea395acSIntiyaz Basha octeon_mbox_cancel(oct, 0);
1507cea395acSIntiyaz Basha dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
1508cea395acSIntiyaz Basha vfidx);
1509cea395acSIntiyaz Basha return -1;
1510cea395acSIntiyaz Basha }
1511cea395acSIntiyaz Basha
1512cea395acSIntiyaz Basha return 0;
1513cea395acSIntiyaz Basha }
1514*f71be9d0SMasahiro Yamada EXPORT_SYMBOL_GPL(cn23xx_get_vf_stats);
1515