1*26ad340eSHenning Colliander // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2*26ad340eSHenning Colliander /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3*26ad340eSHenning Colliander * Parts of this driver are based on the following: 4*26ad340eSHenning Colliander * - Kvaser linux pciefd driver (version 5.25) 5*26ad340eSHenning Colliander * - PEAK linux canfd driver 6*26ad340eSHenning Colliander * - Altera Avalon EPCS flash controller driver 7*26ad340eSHenning Colliander */ 8*26ad340eSHenning Colliander 9*26ad340eSHenning Colliander #include <linux/kernel.h> 10*26ad340eSHenning Colliander #include <linux/version.h> 11*26ad340eSHenning Colliander #include <linux/module.h> 12*26ad340eSHenning Colliander #include <linux/device.h> 13*26ad340eSHenning Colliander #include <linux/pci.h> 14*26ad340eSHenning Colliander #include <linux/can/dev.h> 15*26ad340eSHenning Colliander #include <linux/timer.h> 16*26ad340eSHenning Colliander #include <linux/netdevice.h> 17*26ad340eSHenning Colliander #include <linux/crc32.h> 18*26ad340eSHenning Colliander #include <linux/iopoll.h> 19*26ad340eSHenning Colliander 20*26ad340eSHenning Colliander MODULE_LICENSE("Dual BSD/GPL"); 21*26ad340eSHenning Colliander MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 22*26ad340eSHenning Colliander MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 23*26ad340eSHenning Colliander 24*26ad340eSHenning Colliander #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 25*26ad340eSHenning Colliander 26*26ad340eSHenning Colliander #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 27*26ad340eSHenning Colliander #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 28*26ad340eSHenning Colliander #define KVASER_PCIEFD_MAX_ERR_REP 256 29*26ad340eSHenning Colliander #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 30*26ad340eSHenning Colliander #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 31*26ad340eSHenning Colliander #define KVASER_PCIEFD_DMA_COUNT 2 32*26ad340eSHenning Colliander 33*26ad340eSHenning Colliander #define KVASER_PCIEFD_DMA_SIZE (4 * 1024) 34*26ad340eSHenning Colliander #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) 35*26ad340eSHenning Colliander 36*26ad340eSHenning Colliander #define KVASER_PCIEFD_VENDOR 0x1a07 37*26ad340eSHenning Colliander #define KVASER_PCIEFD_4HS_ID 0x0d 38*26ad340eSHenning Colliander #define KVASER_PCIEFD_2HS_ID 0x0e 39*26ad340eSHenning Colliander #define KVASER_PCIEFD_HS_ID 0x0f 40*26ad340eSHenning Colliander #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 41*26ad340eSHenning Colliander #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 42*26ad340eSHenning Colliander 43*26ad340eSHenning Colliander /* PCIe IRQ registers */ 44*26ad340eSHenning Colliander #define KVASER_PCIEFD_IRQ_REG 0x40 45*26ad340eSHenning Colliander #define KVASER_PCIEFD_IEN_REG 0x50 46*26ad340eSHenning Colliander /* DMA map */ 47*26ad340eSHenning Colliander #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 48*26ad340eSHenning Colliander /* Kvaser KCAN CAN controller registers */ 49*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN0_BASE 0x10000 50*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 51*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 52*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 53*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 54*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 55*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 56*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 57*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 58*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 59*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 60*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 61*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 62*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 63*26ad340eSHenning Colliander /* Loopback control register */ 64*26ad340eSHenning Colliander #define KVASER_PCIEFD_LOOP_REG 0x1f000 65*26ad340eSHenning Colliander /* System identification and information registers */ 66*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_BASE 0x1f020 67*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) 68*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) 69*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) 70*26ad340eSHenning Colliander /* Shared receive buffer registers */ 71*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_BASE 0x1f200 72*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) 73*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) 74*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) 75*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) 76*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) 77*26ad340eSHenning Colliander /* EPCS flash controller registers */ 78*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_BASE 0x1fc00 79*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE 80*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) 81*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) 82*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) 83*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) 84*26ad340eSHenning Colliander 85*26ad340eSHenning Colliander #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f 86*26ad340eSHenning Colliander #define KVASER_PCIEFD_IRQ_SRB BIT(4) 87*26ad340eSHenning Colliander 88*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 89*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 90*26ad340eSHenning Colliander #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1 91*26ad340eSHenning Colliander 92*26ad340eSHenning Colliander /* Reset DMA buffer 0, 1 and FIFO offset */ 93*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 94*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 95*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 96*26ad340eSHenning Colliander 97*26ad340eSHenning Colliander /* DMA packet done, buffer 0 and 1 */ 98*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 99*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 100*26ad340eSHenning Colliander /* DMA overflow, buffer 0 and 1 */ 101*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 102*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 103*26ad340eSHenning Colliander /* DMA underflow, buffer 0 and 1 */ 104*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 105*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 106*26ad340eSHenning Colliander 107*26ad340eSHenning Colliander /* DMA idle */ 108*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 109*26ad340eSHenning Colliander /* DMA support */ 110*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 111*26ad340eSHenning Colliander 112*26ad340eSHenning Colliander /* DMA Enable */ 113*26ad340eSHenning Colliander #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 114*26ad340eSHenning Colliander 115*26ad340eSHenning Colliander /* EPCS flash controller definitions */ 116*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) 117*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) 118*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_MAX_PARAMS 256 119*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d 120*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 121*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_SYS_VER 1 122*26ad340eSHenning Colliander #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 123*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_TMT BIT(5) 124*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_TRDY BIT(6) 125*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPI_RRDY BIT(7) 126*26ad340eSHenning Colliander #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14 127*26ad340eSHenning Colliander /* Commands for controlling the onboard flash */ 128*26ad340eSHenning Colliander #define KVASER_PCIEFD_FLASH_RES_CMD 0xab 129*26ad340eSHenning Colliander #define KVASER_PCIEFD_FLASH_READ_CMD 0x3 130*26ad340eSHenning Colliander #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5 131*26ad340eSHenning Colliander 132*26ad340eSHenning Colliander /* Kvaser KCAN definitions */ 133*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) 134*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) 135*26ad340eSHenning Colliander 136*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16 137*26ad340eSHenning Colliander /* Request status packet */ 138*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 139*26ad340eSHenning Colliander /* Abort, flush and reset */ 140*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 141*26ad340eSHenning Colliander 142*26ad340eSHenning Colliander /* Tx FIFO unaligned read */ 143*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 144*26ad340eSHenning Colliander /* Tx FIFO unaligned end */ 145*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 146*26ad340eSHenning Colliander /* Bus parameter protection error */ 147*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 148*26ad340eSHenning Colliander /* FDF bit when controller is in classic mode */ 149*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 150*26ad340eSHenning Colliander /* Rx FIFO overflow */ 151*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 152*26ad340eSHenning Colliander /* Abort done */ 153*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 154*26ad340eSHenning Colliander /* Tx buffer flush done */ 155*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 156*26ad340eSHenning Colliander /* Tx FIFO overflow */ 157*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 158*26ad340eSHenning Colliander /* Tx FIFO empty */ 159*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 160*26ad340eSHenning Colliander /* Transmitter unaligned */ 161*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 162*26ad340eSHenning Colliander 163*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 164*26ad340eSHenning Colliander 165*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 166*26ad340eSHenning Colliander /* Abort request */ 167*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 168*26ad340eSHenning Colliander /* Idle state. Controller in reset mode and no abort or flush pending */ 169*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 170*26ad340eSHenning Colliander /* Bus off */ 171*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 172*26ad340eSHenning Colliander /* Reset mode request */ 173*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 174*26ad340eSHenning Colliander /* Controller in reset mode */ 175*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 176*26ad340eSHenning Colliander /* Controller got one-shot capability */ 177*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 178*26ad340eSHenning Colliander /* Controller got CAN FD capability */ 179*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 180*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ 181*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ 182*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_STAT_IRM) 183*26ad340eSHenning Colliander 184*26ad340eSHenning Colliander /* Reset mode */ 185*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 186*26ad340eSHenning Colliander /* Listen only mode */ 187*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 188*26ad340eSHenning Colliander /* Error packet enable */ 189*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 190*26ad340eSHenning Colliander /* CAN FD non-ISO */ 191*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 192*26ad340eSHenning Colliander /* Acknowledgment packet type */ 193*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 194*26ad340eSHenning Colliander /* Active error flag enable. Clear to force error passive */ 195*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 196*26ad340eSHenning Colliander /* Classic CAN mode */ 197*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 198*26ad340eSHenning Colliander 199*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 200*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 201*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 202*26ad340eSHenning Colliander 203*26ad340eSHenning Colliander #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 204*26ad340eSHenning Colliander 205*26ad340eSHenning Colliander /* Kvaser KCAN packet types */ 206*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_DATA 0 207*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_ACK 1 208*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 209*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_ERROR 3 210*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 211*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 212*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 213*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_STATUS 8 214*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 215*26ad340eSHenning Colliander 216*26ad340eSHenning Colliander /* Kvaser KCAN packet common definitions */ 217*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff 218*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 219*26ad340eSHenning Colliander #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28 220*26ad340eSHenning Colliander 221*26ad340eSHenning Colliander /* Kvaser KCAN TDATA and RDATA first word */ 222*26ad340eSHenning Colliander #define KVASER_PCIEFD_RPACKET_IDE BIT(30) 223*26ad340eSHenning Colliander #define KVASER_PCIEFD_RPACKET_RTR BIT(29) 224*26ad340eSHenning Colliander /* Kvaser KCAN TDATA and RDATA second word */ 225*26ad340eSHenning Colliander #define KVASER_PCIEFD_RPACKET_ESI BIT(13) 226*26ad340eSHenning Colliander #define KVASER_PCIEFD_RPACKET_BRS BIT(14) 227*26ad340eSHenning Colliander #define KVASER_PCIEFD_RPACKET_FDF BIT(15) 228*26ad340eSHenning Colliander #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8 229*26ad340eSHenning Colliander /* Kvaser KCAN TDATA second word */ 230*26ad340eSHenning Colliander #define KVASER_PCIEFD_TPACKET_SMS BIT(16) 231*26ad340eSHenning Colliander #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 232*26ad340eSHenning Colliander 233*26ad340eSHenning Colliander /* Kvaser KCAN APACKET */ 234*26ad340eSHenning Colliander #define KVASER_PCIEFD_APACKET_FLU BIT(8) 235*26ad340eSHenning Colliander #define KVASER_PCIEFD_APACKET_CT BIT(9) 236*26ad340eSHenning Colliander #define KVASER_PCIEFD_APACKET_ABL BIT(10) 237*26ad340eSHenning Colliander #define KVASER_PCIEFD_APACKET_NACK BIT(11) 238*26ad340eSHenning Colliander 239*26ad340eSHenning Colliander /* Kvaser KCAN SPACK first word */ 240*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8 241*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_BOFF BIT(16) 242*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_IDET BIT(20) 243*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_IRM BIT(21) 244*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_RMCD BIT(22) 245*26ad340eSHenning Colliander /* Kvaser KCAN SPACK second word */ 246*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_AUTO BIT(21) 247*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_EWLR BIT(23) 248*26ad340eSHenning Colliander #define KVASER_PCIEFD_SPACK_EPLR BIT(24) 249*26ad340eSHenning Colliander 250*26ad340eSHenning Colliander struct kvaser_pciefd; 251*26ad340eSHenning Colliander 252*26ad340eSHenning Colliander struct kvaser_pciefd_can { 253*26ad340eSHenning Colliander struct can_priv can; 254*26ad340eSHenning Colliander struct kvaser_pciefd *kv_pcie; 255*26ad340eSHenning Colliander void __iomem *reg_base; 256*26ad340eSHenning Colliander struct can_berr_counter bec; 257*26ad340eSHenning Colliander u8 cmd_seq; 258*26ad340eSHenning Colliander int err_rep_cnt; 259*26ad340eSHenning Colliander int echo_idx; 260*26ad340eSHenning Colliander spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 261*26ad340eSHenning Colliander spinlock_t echo_lock; /* Locks the message echo buffer */ 262*26ad340eSHenning Colliander struct timer_list bec_poll_timer; 263*26ad340eSHenning Colliander struct completion start_comp, flush_comp; 264*26ad340eSHenning Colliander }; 265*26ad340eSHenning Colliander 266*26ad340eSHenning Colliander struct kvaser_pciefd { 267*26ad340eSHenning Colliander struct pci_dev *pci; 268*26ad340eSHenning Colliander void __iomem *reg_base; 269*26ad340eSHenning Colliander struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 270*26ad340eSHenning Colliander void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 271*26ad340eSHenning Colliander u8 nr_channels; 272*26ad340eSHenning Colliander u32 freq; 273*26ad340eSHenning Colliander u32 freq_to_ticks_div; 274*26ad340eSHenning Colliander }; 275*26ad340eSHenning Colliander 276*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet { 277*26ad340eSHenning Colliander u32 header[2]; 278*26ad340eSHenning Colliander u64 timestamp; 279*26ad340eSHenning Colliander }; 280*26ad340eSHenning Colliander 281*26ad340eSHenning Colliander struct kvaser_pciefd_tx_packet { 282*26ad340eSHenning Colliander u32 header[2]; 283*26ad340eSHenning Colliander u8 data[64]; 284*26ad340eSHenning Colliander }; 285*26ad340eSHenning Colliander 286*26ad340eSHenning Colliander static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 287*26ad340eSHenning Colliander .name = KVASER_PCIEFD_DRV_NAME, 288*26ad340eSHenning Colliander .tseg1_min = 1, 289*26ad340eSHenning Colliander .tseg1_max = 255, 290*26ad340eSHenning Colliander .tseg2_min = 1, 291*26ad340eSHenning Colliander .tseg2_max = 32, 292*26ad340eSHenning Colliander .sjw_max = 16, 293*26ad340eSHenning Colliander .brp_min = 1, 294*26ad340eSHenning Colliander .brp_max = 4096, 295*26ad340eSHenning Colliander .brp_inc = 1, 296*26ad340eSHenning Colliander }; 297*26ad340eSHenning Colliander 298*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_param { 299*26ad340eSHenning Colliander __le32 magic; 300*26ad340eSHenning Colliander __le32 nr; 301*26ad340eSHenning Colliander __le32 len; 302*26ad340eSHenning Colliander u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; 303*26ad340eSHenning Colliander }; 304*26ad340eSHenning Colliander 305*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_img { 306*26ad340eSHenning Colliander __le32 version; 307*26ad340eSHenning Colliander __le32 magic; 308*26ad340eSHenning Colliander __le32 crc; 309*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; 310*26ad340eSHenning Colliander }; 311*26ad340eSHenning Colliander 312*26ad340eSHenning Colliander static struct pci_device_id kvaser_pciefd_id_table[] = { 313*26ad340eSHenning Colliander { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, 314*26ad340eSHenning Colliander { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, 315*26ad340eSHenning Colliander { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, 316*26ad340eSHenning Colliander { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, 317*26ad340eSHenning Colliander { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, 318*26ad340eSHenning Colliander { 0,}, 319*26ad340eSHenning Colliander }; 320*26ad340eSHenning Colliander MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 321*26ad340eSHenning Colliander 322*26ad340eSHenning Colliander /* Onboard flash memory functions */ 323*26ad340eSHenning Colliander static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) 324*26ad340eSHenning Colliander { 325*26ad340eSHenning Colliander u32 res; 326*26ad340eSHenning Colliander int ret; 327*26ad340eSHenning Colliander 328*26ad340eSHenning Colliander ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG, 329*26ad340eSHenning Colliander res, res & msk, 0, 10); 330*26ad340eSHenning Colliander 331*26ad340eSHenning Colliander return ret; 332*26ad340eSHenning Colliander } 333*26ad340eSHenning Colliander 334*26ad340eSHenning Colliander static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, 335*26ad340eSHenning Colliander u32 tx_len, u8 *rx, u32 rx_len) 336*26ad340eSHenning Colliander { 337*26ad340eSHenning Colliander int c; 338*26ad340eSHenning Colliander 339*26ad340eSHenning Colliander iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG); 340*26ad340eSHenning Colliander iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 341*26ad340eSHenning Colliander ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 342*26ad340eSHenning Colliander 343*26ad340eSHenning Colliander c = tx_len; 344*26ad340eSHenning Colliander while (c--) { 345*26ad340eSHenning Colliander if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 346*26ad340eSHenning Colliander return -EIO; 347*26ad340eSHenning Colliander 348*26ad340eSHenning Colliander iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 349*26ad340eSHenning Colliander 350*26ad340eSHenning Colliander if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 351*26ad340eSHenning Colliander return -EIO; 352*26ad340eSHenning Colliander 353*26ad340eSHenning Colliander ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 354*26ad340eSHenning Colliander } 355*26ad340eSHenning Colliander 356*26ad340eSHenning Colliander c = rx_len; 357*26ad340eSHenning Colliander while (c-- > 0) { 358*26ad340eSHenning Colliander if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 359*26ad340eSHenning Colliander return -EIO; 360*26ad340eSHenning Colliander 361*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 362*26ad340eSHenning Colliander 363*26ad340eSHenning Colliander if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 364*26ad340eSHenning Colliander return -EIO; 365*26ad340eSHenning Colliander 366*26ad340eSHenning Colliander *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 367*26ad340eSHenning Colliander } 368*26ad340eSHenning Colliander 369*26ad340eSHenning Colliander if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) 370*26ad340eSHenning Colliander return -EIO; 371*26ad340eSHenning Colliander 372*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 373*26ad340eSHenning Colliander 374*26ad340eSHenning Colliander if (c != -1) { 375*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); 376*26ad340eSHenning Colliander return -EIO; 377*26ad340eSHenning Colliander } 378*26ad340eSHenning Colliander 379*26ad340eSHenning Colliander return 0; 380*26ad340eSHenning Colliander } 381*26ad340eSHenning Colliander 382*26ad340eSHenning Colliander static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, 383*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_img *img) 384*26ad340eSHenning Colliander { 385*26ad340eSHenning Colliander int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; 386*26ad340eSHenning Colliander int res, crc; 387*26ad340eSHenning Colliander u8 *crc_buff; 388*26ad340eSHenning Colliander 389*26ad340eSHenning Colliander u8 cmd[] = { 390*26ad340eSHenning Colliander KVASER_PCIEFD_FLASH_READ_CMD, 391*26ad340eSHenning Colliander (u8)((offset >> 16) & 0xff), 392*26ad340eSHenning Colliander (u8)((offset >> 8) & 0xff), 393*26ad340eSHenning Colliander (u8)(offset & 0xff) 394*26ad340eSHenning Colliander }; 395*26ad340eSHenning Colliander 396*26ad340eSHenning Colliander res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img, 397*26ad340eSHenning Colliander KVASER_PCIEFD_CFG_IMG_SZ); 398*26ad340eSHenning Colliander if (res) 399*26ad340eSHenning Colliander return res; 400*26ad340eSHenning Colliander 401*26ad340eSHenning Colliander crc_buff = (u8 *)img->params; 402*26ad340eSHenning Colliander 403*26ad340eSHenning Colliander if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { 404*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 405*26ad340eSHenning Colliander "Config flash corrupted, version number is wrong\n"); 406*26ad340eSHenning Colliander return -ENODEV; 407*26ad340eSHenning Colliander } 408*26ad340eSHenning Colliander 409*26ad340eSHenning Colliander if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { 410*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 411*26ad340eSHenning Colliander "Config flash corrupted, magic number is wrong\n"); 412*26ad340eSHenning Colliander return -ENODEV; 413*26ad340eSHenning Colliander } 414*26ad340eSHenning Colliander 415*26ad340eSHenning Colliander crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); 416*26ad340eSHenning Colliander if (le32_to_cpu(img->crc) != crc) { 417*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 418*26ad340eSHenning Colliander "Stored CRC does not match flash image contents\n"); 419*26ad340eSHenning Colliander return -EIO; 420*26ad340eSHenning Colliander } 421*26ad340eSHenning Colliander 422*26ad340eSHenning Colliander return 0; 423*26ad340eSHenning Colliander } 424*26ad340eSHenning Colliander 425*26ad340eSHenning Colliander static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, 426*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_img *img) 427*26ad340eSHenning Colliander { 428*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_param *param; 429*26ad340eSHenning Colliander 430*26ad340eSHenning Colliander param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; 431*26ad340eSHenning Colliander memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); 432*26ad340eSHenning Colliander } 433*26ad340eSHenning Colliander 434*26ad340eSHenning Colliander static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) 435*26ad340eSHenning Colliander { 436*26ad340eSHenning Colliander int res; 437*26ad340eSHenning Colliander struct kvaser_pciefd_cfg_img *img; 438*26ad340eSHenning Colliander 439*26ad340eSHenning Colliander /* Read electronic signature */ 440*26ad340eSHenning Colliander u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; 441*26ad340eSHenning Colliander 442*26ad340eSHenning Colliander res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1); 443*26ad340eSHenning Colliander if (res) 444*26ad340eSHenning Colliander return -EIO; 445*26ad340eSHenning Colliander 446*26ad340eSHenning Colliander img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); 447*26ad340eSHenning Colliander if (!img) 448*26ad340eSHenning Colliander return -ENOMEM; 449*26ad340eSHenning Colliander 450*26ad340eSHenning Colliander if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) { 451*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 452*26ad340eSHenning Colliander "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n", 453*26ad340eSHenning Colliander cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16); 454*26ad340eSHenning Colliander 455*26ad340eSHenning Colliander res = -ENODEV; 456*26ad340eSHenning Colliander goto image_free; 457*26ad340eSHenning Colliander } 458*26ad340eSHenning Colliander 459*26ad340eSHenning Colliander cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; 460*26ad340eSHenning Colliander res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); 461*26ad340eSHenning Colliander if (res) { 462*26ad340eSHenning Colliander goto image_free; 463*26ad340eSHenning Colliander } else if (cmd[0] & 1) { 464*26ad340eSHenning Colliander res = -EIO; 465*26ad340eSHenning Colliander /* No write is ever done, the WIP should never be set */ 466*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); 467*26ad340eSHenning Colliander goto image_free; 468*26ad340eSHenning Colliander } 469*26ad340eSHenning Colliander 470*26ad340eSHenning Colliander res = kvaser_pciefd_cfg_read_and_verify(pcie, img); 471*26ad340eSHenning Colliander if (res) { 472*26ad340eSHenning Colliander res = -EIO; 473*26ad340eSHenning Colliander goto image_free; 474*26ad340eSHenning Colliander } 475*26ad340eSHenning Colliander 476*26ad340eSHenning Colliander kvaser_pciefd_cfg_read_params(pcie, img); 477*26ad340eSHenning Colliander 478*26ad340eSHenning Colliander image_free: 479*26ad340eSHenning Colliander kfree(img); 480*26ad340eSHenning Colliander return res; 481*26ad340eSHenning Colliander } 482*26ad340eSHenning Colliander 483*26ad340eSHenning Colliander static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 484*26ad340eSHenning Colliander { 485*26ad340eSHenning Colliander u32 cmd; 486*26ad340eSHenning Colliander 487*26ad340eSHenning Colliander cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; 488*26ad340eSHenning Colliander cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 489*26ad340eSHenning Colliander iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 490*26ad340eSHenning Colliander } 491*26ad340eSHenning Colliander 492*26ad340eSHenning Colliander static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 493*26ad340eSHenning Colliander { 494*26ad340eSHenning Colliander u32 mode; 495*26ad340eSHenning Colliander unsigned long irq; 496*26ad340eSHenning Colliander 497*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 498*26ad340eSHenning Colliander mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 499*26ad340eSHenning Colliander if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 500*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 501*26ad340eSHenning Colliander iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 502*26ad340eSHenning Colliander } 503*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 504*26ad340eSHenning Colliander } 505*26ad340eSHenning Colliander 506*26ad340eSHenning Colliander static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 507*26ad340eSHenning Colliander { 508*26ad340eSHenning Colliander u32 mode; 509*26ad340eSHenning Colliander unsigned long irq; 510*26ad340eSHenning Colliander 511*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 512*26ad340eSHenning Colliander mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 513*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 514*26ad340eSHenning Colliander iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 515*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 516*26ad340eSHenning Colliander } 517*26ad340eSHenning Colliander 518*26ad340eSHenning Colliander static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 519*26ad340eSHenning Colliander { 520*26ad340eSHenning Colliander u32 msk; 521*26ad340eSHenning Colliander 522*26ad340eSHenning Colliander msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 523*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 524*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 525*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 526*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; 527*26ad340eSHenning Colliander 528*26ad340eSHenning Colliander iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 529*26ad340eSHenning Colliander 530*26ad340eSHenning Colliander return 0; 531*26ad340eSHenning Colliander } 532*26ad340eSHenning Colliander 533*26ad340eSHenning Colliander static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 534*26ad340eSHenning Colliander { 535*26ad340eSHenning Colliander u32 mode; 536*26ad340eSHenning Colliander unsigned long irq; 537*26ad340eSHenning Colliander 538*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 539*26ad340eSHenning Colliander 540*26ad340eSHenning Colliander mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 541*26ad340eSHenning Colliander if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 542*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 543*26ad340eSHenning Colliander if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 544*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 545*26ad340eSHenning Colliander else 546*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 547*26ad340eSHenning Colliander } else { 548*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 549*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 550*26ad340eSHenning Colliander } 551*26ad340eSHenning Colliander 552*26ad340eSHenning Colliander if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 553*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 554*26ad340eSHenning Colliander 555*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 556*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 557*26ad340eSHenning Colliander /* Use ACK packet type */ 558*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 559*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 560*26ad340eSHenning Colliander iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 561*26ad340eSHenning Colliander 562*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 563*26ad340eSHenning Colliander } 564*26ad340eSHenning Colliander 565*26ad340eSHenning Colliander static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 566*26ad340eSHenning Colliander { 567*26ad340eSHenning Colliander u32 status; 568*26ad340eSHenning Colliander unsigned long irq; 569*26ad340eSHenning Colliander 570*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 571*26ad340eSHenning Colliander iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 572*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, 573*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 574*26ad340eSHenning Colliander 575*26ad340eSHenning Colliander status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 576*26ad340eSHenning Colliander if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 577*26ad340eSHenning Colliander u32 cmd; 578*26ad340eSHenning Colliander 579*26ad340eSHenning Colliander /* If controller is already idle, run abort, flush and reset */ 580*26ad340eSHenning Colliander cmd = KVASER_PCIEFD_KCAN_CMD_AT; 581*26ad340eSHenning Colliander cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 582*26ad340eSHenning Colliander iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 583*26ad340eSHenning Colliander } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 584*26ad340eSHenning Colliander u32 mode; 585*26ad340eSHenning Colliander 586*26ad340eSHenning Colliander /* Put controller in reset mode */ 587*26ad340eSHenning Colliander mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 588*26ad340eSHenning Colliander mode |= KVASER_PCIEFD_KCAN_MODE_RM; 589*26ad340eSHenning Colliander iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 590*26ad340eSHenning Colliander } 591*26ad340eSHenning Colliander 592*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 593*26ad340eSHenning Colliander } 594*26ad340eSHenning Colliander 595*26ad340eSHenning Colliander static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 596*26ad340eSHenning Colliander { 597*26ad340eSHenning Colliander u32 mode; 598*26ad340eSHenning Colliander unsigned long irq; 599*26ad340eSHenning Colliander 600*26ad340eSHenning Colliander del_timer(&can->bec_poll_timer); 601*26ad340eSHenning Colliander 602*26ad340eSHenning Colliander if (!completion_done(&can->flush_comp)) 603*26ad340eSHenning Colliander kvaser_pciefd_start_controller_flush(can); 604*26ad340eSHenning Colliander 605*26ad340eSHenning Colliander if (!wait_for_completion_timeout(&can->flush_comp, 606*26ad340eSHenning Colliander KVASER_PCIEFD_WAIT_TIMEOUT)) { 607*26ad340eSHenning Colliander netdev_err(can->can.dev, "Timeout during bus on flush\n"); 608*26ad340eSHenning Colliander return -ETIMEDOUT; 609*26ad340eSHenning Colliander } 610*26ad340eSHenning Colliander 611*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 612*26ad340eSHenning Colliander iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 613*26ad340eSHenning Colliander iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 614*26ad340eSHenning Colliander 615*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, 616*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 617*26ad340eSHenning Colliander 618*26ad340eSHenning Colliander mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 619*26ad340eSHenning Colliander mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 620*26ad340eSHenning Colliander iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 621*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 622*26ad340eSHenning Colliander 623*26ad340eSHenning Colliander if (!wait_for_completion_timeout(&can->start_comp, 624*26ad340eSHenning Colliander KVASER_PCIEFD_WAIT_TIMEOUT)) { 625*26ad340eSHenning Colliander netdev_err(can->can.dev, "Timeout during bus on reset\n"); 626*26ad340eSHenning Colliander return -ETIMEDOUT; 627*26ad340eSHenning Colliander } 628*26ad340eSHenning Colliander /* Reset interrupt handling */ 629*26ad340eSHenning Colliander iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 630*26ad340eSHenning Colliander iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 631*26ad340eSHenning Colliander 632*26ad340eSHenning Colliander kvaser_pciefd_set_tx_irq(can); 633*26ad340eSHenning Colliander kvaser_pciefd_setup_controller(can); 634*26ad340eSHenning Colliander 635*26ad340eSHenning Colliander can->can.state = CAN_STATE_ERROR_ACTIVE; 636*26ad340eSHenning Colliander netif_wake_queue(can->can.dev); 637*26ad340eSHenning Colliander can->bec.txerr = 0; 638*26ad340eSHenning Colliander can->bec.rxerr = 0; 639*26ad340eSHenning Colliander can->err_rep_cnt = 0; 640*26ad340eSHenning Colliander 641*26ad340eSHenning Colliander return 0; 642*26ad340eSHenning Colliander } 643*26ad340eSHenning Colliander 644*26ad340eSHenning Colliander static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 645*26ad340eSHenning Colliander { 646*26ad340eSHenning Colliander int top, trigger; 647*26ad340eSHenning Colliander u32 pwm_ctrl; 648*26ad340eSHenning Colliander unsigned long irq; 649*26ad340eSHenning Colliander 650*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 651*26ad340eSHenning Colliander pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 652*26ad340eSHenning Colliander top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; 653*26ad340eSHenning Colliander 654*26ad340eSHenning Colliander trigger = (100 * top + 50) / 100; 655*26ad340eSHenning Colliander if (trigger < 0) 656*26ad340eSHenning Colliander trigger = 0; 657*26ad340eSHenning Colliander 658*26ad340eSHenning Colliander pwm_ctrl = trigger & 0xff; 659*26ad340eSHenning Colliander pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 660*26ad340eSHenning Colliander iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 661*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 662*26ad340eSHenning Colliander } 663*26ad340eSHenning Colliander 664*26ad340eSHenning Colliander static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 665*26ad340eSHenning Colliander { 666*26ad340eSHenning Colliander int top, trigger; 667*26ad340eSHenning Colliander u32 pwm_ctrl; 668*26ad340eSHenning Colliander unsigned long irq; 669*26ad340eSHenning Colliander 670*26ad340eSHenning Colliander kvaser_pciefd_pwm_stop(can); 671*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq); 672*26ad340eSHenning Colliander 673*26ad340eSHenning Colliander /* Set frequency to 500 KHz*/ 674*26ad340eSHenning Colliander top = can->can.clock.freq / (2 * 500000) - 1; 675*26ad340eSHenning Colliander 676*26ad340eSHenning Colliander pwm_ctrl = top & 0xff; 677*26ad340eSHenning Colliander pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 678*26ad340eSHenning Colliander iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 679*26ad340eSHenning Colliander 680*26ad340eSHenning Colliander /* Set duty cycle to 95 */ 681*26ad340eSHenning Colliander trigger = (100 * top - 95 * (top + 1) + 50) / 100; 682*26ad340eSHenning Colliander pwm_ctrl = trigger & 0xff; 683*26ad340eSHenning Colliander pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 684*26ad340eSHenning Colliander iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 685*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq); 686*26ad340eSHenning Colliander } 687*26ad340eSHenning Colliander 688*26ad340eSHenning Colliander static int kvaser_pciefd_open(struct net_device *netdev) 689*26ad340eSHenning Colliander { 690*26ad340eSHenning Colliander int err; 691*26ad340eSHenning Colliander struct kvaser_pciefd_can *can = netdev_priv(netdev); 692*26ad340eSHenning Colliander 693*26ad340eSHenning Colliander err = open_candev(netdev); 694*26ad340eSHenning Colliander if (err) 695*26ad340eSHenning Colliander return err; 696*26ad340eSHenning Colliander 697*26ad340eSHenning Colliander err = kvaser_pciefd_bus_on(can); 698*26ad340eSHenning Colliander if (err) 699*26ad340eSHenning Colliander return err; 700*26ad340eSHenning Colliander 701*26ad340eSHenning Colliander return 0; 702*26ad340eSHenning Colliander } 703*26ad340eSHenning Colliander 704*26ad340eSHenning Colliander static int kvaser_pciefd_stop(struct net_device *netdev) 705*26ad340eSHenning Colliander { 706*26ad340eSHenning Colliander struct kvaser_pciefd_can *can = netdev_priv(netdev); 707*26ad340eSHenning Colliander int ret = 0; 708*26ad340eSHenning Colliander 709*26ad340eSHenning Colliander /* Don't interrupt ongoing flush */ 710*26ad340eSHenning Colliander if (!completion_done(&can->flush_comp)) 711*26ad340eSHenning Colliander kvaser_pciefd_start_controller_flush(can); 712*26ad340eSHenning Colliander 713*26ad340eSHenning Colliander if (!wait_for_completion_timeout(&can->flush_comp, 714*26ad340eSHenning Colliander KVASER_PCIEFD_WAIT_TIMEOUT)) { 715*26ad340eSHenning Colliander netdev_err(can->can.dev, "Timeout during stop\n"); 716*26ad340eSHenning Colliander ret = -ETIMEDOUT; 717*26ad340eSHenning Colliander } else { 718*26ad340eSHenning Colliander iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 719*26ad340eSHenning Colliander del_timer(&can->bec_poll_timer); 720*26ad340eSHenning Colliander } 721*26ad340eSHenning Colliander close_candev(netdev); 722*26ad340eSHenning Colliander 723*26ad340eSHenning Colliander return ret; 724*26ad340eSHenning Colliander } 725*26ad340eSHenning Colliander 726*26ad340eSHenning Colliander static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 727*26ad340eSHenning Colliander struct kvaser_pciefd_can *can, 728*26ad340eSHenning Colliander struct sk_buff *skb) 729*26ad340eSHenning Colliander { 730*26ad340eSHenning Colliander struct canfd_frame *cf = (struct canfd_frame *)skb->data; 731*26ad340eSHenning Colliander int packet_size; 732*26ad340eSHenning Colliander int seq = can->echo_idx; 733*26ad340eSHenning Colliander 734*26ad340eSHenning Colliander memset(p, 0, sizeof(*p)); 735*26ad340eSHenning Colliander 736*26ad340eSHenning Colliander if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 737*26ad340eSHenning Colliander p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 738*26ad340eSHenning Colliander 739*26ad340eSHenning Colliander if (cf->can_id & CAN_RTR_FLAG) 740*26ad340eSHenning Colliander p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 741*26ad340eSHenning Colliander 742*26ad340eSHenning Colliander if (cf->can_id & CAN_EFF_FLAG) 743*26ad340eSHenning Colliander p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 744*26ad340eSHenning Colliander 745*26ad340eSHenning Colliander p->header[0] |= cf->can_id & CAN_EFF_MASK; 746*26ad340eSHenning Colliander p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; 747*26ad340eSHenning Colliander p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 748*26ad340eSHenning Colliander 749*26ad340eSHenning Colliander if (can_is_canfd_skb(skb)) { 750*26ad340eSHenning Colliander p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 751*26ad340eSHenning Colliander if (cf->flags & CANFD_BRS) 752*26ad340eSHenning Colliander p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 753*26ad340eSHenning Colliander if (cf->flags & CANFD_ESI) 754*26ad340eSHenning Colliander p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 755*26ad340eSHenning Colliander } 756*26ad340eSHenning Colliander 757*26ad340eSHenning Colliander p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; 758*26ad340eSHenning Colliander 759*26ad340eSHenning Colliander packet_size = cf->len; 760*26ad340eSHenning Colliander memcpy(p->data, cf->data, packet_size); 761*26ad340eSHenning Colliander 762*26ad340eSHenning Colliander return DIV_ROUND_UP(packet_size, 4); 763*26ad340eSHenning Colliander } 764*26ad340eSHenning Colliander 765*26ad340eSHenning Colliander static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 766*26ad340eSHenning Colliander struct net_device *netdev) 767*26ad340eSHenning Colliander { 768*26ad340eSHenning Colliander struct kvaser_pciefd_can *can = netdev_priv(netdev); 769*26ad340eSHenning Colliander unsigned long irq_flags; 770*26ad340eSHenning Colliander struct kvaser_pciefd_tx_packet packet; 771*26ad340eSHenning Colliander int nwords; 772*26ad340eSHenning Colliander u8 count; 773*26ad340eSHenning Colliander 774*26ad340eSHenning Colliander if (can_dropped_invalid_skb(netdev, skb)) 775*26ad340eSHenning Colliander return NETDEV_TX_OK; 776*26ad340eSHenning Colliander 777*26ad340eSHenning Colliander nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 778*26ad340eSHenning Colliander 779*26ad340eSHenning Colliander spin_lock_irqsave(&can->echo_lock, irq_flags); 780*26ad340eSHenning Colliander 781*26ad340eSHenning Colliander /* Prepare and save echo skb in internal slot */ 782*26ad340eSHenning Colliander can_put_echo_skb(skb, netdev, can->echo_idx); 783*26ad340eSHenning Colliander 784*26ad340eSHenning Colliander /* Move echo index to the next slot */ 785*26ad340eSHenning Colliander can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 786*26ad340eSHenning Colliander 787*26ad340eSHenning Colliander /* Write header to fifo */ 788*26ad340eSHenning Colliander iowrite32(packet.header[0], 789*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 790*26ad340eSHenning Colliander iowrite32(packet.header[1], 791*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 792*26ad340eSHenning Colliander 793*26ad340eSHenning Colliander if (nwords) { 794*26ad340eSHenning Colliander u32 data_last = ((u32 *)packet.data)[nwords - 1]; 795*26ad340eSHenning Colliander 796*26ad340eSHenning Colliander /* Write data to fifo, except last word */ 797*26ad340eSHenning Colliander iowrite32_rep(can->reg_base + 798*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 799*26ad340eSHenning Colliander nwords - 1); 800*26ad340eSHenning Colliander /* Write last word to end of fifo */ 801*26ad340eSHenning Colliander __raw_writel(data_last, can->reg_base + 802*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 803*26ad340eSHenning Colliander } else { 804*26ad340eSHenning Colliander /* Complete write to fifo */ 805*26ad340eSHenning Colliander __raw_writel(0, can->reg_base + 806*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 807*26ad340eSHenning Colliander } 808*26ad340eSHenning Colliander 809*26ad340eSHenning Colliander count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 810*26ad340eSHenning Colliander /* No room for a new message, stop the queue until at least one 811*26ad340eSHenning Colliander * successful transmit 812*26ad340eSHenning Colliander */ 813*26ad340eSHenning Colliander if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || 814*26ad340eSHenning Colliander can->can.echo_skb[can->echo_idx]) 815*26ad340eSHenning Colliander netif_stop_queue(netdev); 816*26ad340eSHenning Colliander 817*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->echo_lock, irq_flags); 818*26ad340eSHenning Colliander 819*26ad340eSHenning Colliander return NETDEV_TX_OK; 820*26ad340eSHenning Colliander } 821*26ad340eSHenning Colliander 822*26ad340eSHenning Colliander static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 823*26ad340eSHenning Colliander { 824*26ad340eSHenning Colliander u32 mode, test, btrn; 825*26ad340eSHenning Colliander unsigned long irq_flags; 826*26ad340eSHenning Colliander int ret; 827*26ad340eSHenning Colliander struct can_bittiming *bt; 828*26ad340eSHenning Colliander 829*26ad340eSHenning Colliander if (data) 830*26ad340eSHenning Colliander bt = &can->can.data_bittiming; 831*26ad340eSHenning Colliander else 832*26ad340eSHenning Colliander bt = &can->can.bittiming; 833*26ad340eSHenning Colliander 834*26ad340eSHenning Colliander btrn = ((bt->phase_seg2 - 1) & 0x1f) << 835*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | 836*26ad340eSHenning Colliander (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << 837*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | 838*26ad340eSHenning Colliander ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT | 839*26ad340eSHenning Colliander ((bt->brp - 1) & 0x1fff); 840*26ad340eSHenning Colliander 841*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq_flags); 842*26ad340eSHenning Colliander mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 843*26ad340eSHenning Colliander 844*26ad340eSHenning Colliander /* Put the circuit in reset mode */ 845*26ad340eSHenning Colliander iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 846*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 847*26ad340eSHenning Colliander 848*26ad340eSHenning Colliander /* Can only set bittiming if in reset mode */ 849*26ad340eSHenning Colliander ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 850*26ad340eSHenning Colliander test, test & KVASER_PCIEFD_KCAN_MODE_RM, 851*26ad340eSHenning Colliander 0, 10); 852*26ad340eSHenning Colliander 853*26ad340eSHenning Colliander if (ret) { 854*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq_flags); 855*26ad340eSHenning Colliander return -EBUSY; 856*26ad340eSHenning Colliander } 857*26ad340eSHenning Colliander 858*26ad340eSHenning Colliander if (data) 859*26ad340eSHenning Colliander iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 860*26ad340eSHenning Colliander else 861*26ad340eSHenning Colliander iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 862*26ad340eSHenning Colliander 863*26ad340eSHenning Colliander /* Restore previous reset mode status */ 864*26ad340eSHenning Colliander iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 865*26ad340eSHenning Colliander 866*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq_flags); 867*26ad340eSHenning Colliander return 0; 868*26ad340eSHenning Colliander } 869*26ad340eSHenning Colliander 870*26ad340eSHenning Colliander static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 871*26ad340eSHenning Colliander { 872*26ad340eSHenning Colliander return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 873*26ad340eSHenning Colliander } 874*26ad340eSHenning Colliander 875*26ad340eSHenning Colliander static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 876*26ad340eSHenning Colliander { 877*26ad340eSHenning Colliander return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 878*26ad340eSHenning Colliander } 879*26ad340eSHenning Colliander 880*26ad340eSHenning Colliander static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 881*26ad340eSHenning Colliander { 882*26ad340eSHenning Colliander struct kvaser_pciefd_can *can = netdev_priv(ndev); 883*26ad340eSHenning Colliander int ret = 0; 884*26ad340eSHenning Colliander 885*26ad340eSHenning Colliander switch (mode) { 886*26ad340eSHenning Colliander case CAN_MODE_START: 887*26ad340eSHenning Colliander if (!can->can.restart_ms) 888*26ad340eSHenning Colliander ret = kvaser_pciefd_bus_on(can); 889*26ad340eSHenning Colliander break; 890*26ad340eSHenning Colliander default: 891*26ad340eSHenning Colliander return -EOPNOTSUPP; 892*26ad340eSHenning Colliander } 893*26ad340eSHenning Colliander 894*26ad340eSHenning Colliander return ret; 895*26ad340eSHenning Colliander } 896*26ad340eSHenning Colliander 897*26ad340eSHenning Colliander static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 898*26ad340eSHenning Colliander struct can_berr_counter *bec) 899*26ad340eSHenning Colliander { 900*26ad340eSHenning Colliander struct kvaser_pciefd_can *can = netdev_priv(ndev); 901*26ad340eSHenning Colliander 902*26ad340eSHenning Colliander bec->rxerr = can->bec.rxerr; 903*26ad340eSHenning Colliander bec->txerr = can->bec.txerr; 904*26ad340eSHenning Colliander return 0; 905*26ad340eSHenning Colliander } 906*26ad340eSHenning Colliander 907*26ad340eSHenning Colliander static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 908*26ad340eSHenning Colliander { 909*26ad340eSHenning Colliander struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 910*26ad340eSHenning Colliander 911*26ad340eSHenning Colliander kvaser_pciefd_enable_err_gen(can); 912*26ad340eSHenning Colliander kvaser_pciefd_request_status(can); 913*26ad340eSHenning Colliander can->err_rep_cnt = 0; 914*26ad340eSHenning Colliander } 915*26ad340eSHenning Colliander 916*26ad340eSHenning Colliander static const struct net_device_ops kvaser_pciefd_netdev_ops = { 917*26ad340eSHenning Colliander .ndo_open = kvaser_pciefd_open, 918*26ad340eSHenning Colliander .ndo_stop = kvaser_pciefd_stop, 919*26ad340eSHenning Colliander .ndo_start_xmit = kvaser_pciefd_start_xmit, 920*26ad340eSHenning Colliander .ndo_change_mtu = can_change_mtu, 921*26ad340eSHenning Colliander }; 922*26ad340eSHenning Colliander 923*26ad340eSHenning Colliander static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 924*26ad340eSHenning Colliander { 925*26ad340eSHenning Colliander int i; 926*26ad340eSHenning Colliander 927*26ad340eSHenning Colliander for (i = 0; i < pcie->nr_channels; i++) { 928*26ad340eSHenning Colliander struct net_device *netdev; 929*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 930*26ad340eSHenning Colliander u32 status, tx_npackets; 931*26ad340eSHenning Colliander 932*26ad340eSHenning Colliander netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 933*26ad340eSHenning Colliander KVASER_PCIEFD_CAN_TX_MAX_COUNT); 934*26ad340eSHenning Colliander if (!netdev) 935*26ad340eSHenning Colliander return -ENOMEM; 936*26ad340eSHenning Colliander 937*26ad340eSHenning Colliander can = netdev_priv(netdev); 938*26ad340eSHenning Colliander netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 939*26ad340eSHenning Colliander can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + 940*26ad340eSHenning Colliander i * KVASER_PCIEFD_KCAN_BASE_OFFSET; 941*26ad340eSHenning Colliander 942*26ad340eSHenning Colliander can->kv_pcie = pcie; 943*26ad340eSHenning Colliander can->cmd_seq = 0; 944*26ad340eSHenning Colliander can->err_rep_cnt = 0; 945*26ad340eSHenning Colliander can->bec.txerr = 0; 946*26ad340eSHenning Colliander can->bec.rxerr = 0; 947*26ad340eSHenning Colliander 948*26ad340eSHenning Colliander init_completion(&can->start_comp); 949*26ad340eSHenning Colliander init_completion(&can->flush_comp); 950*26ad340eSHenning Colliander timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 951*26ad340eSHenning Colliander 0); 952*26ad340eSHenning Colliander 953*26ad340eSHenning Colliander tx_npackets = ioread32(can->reg_base + 954*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 955*26ad340eSHenning Colliander if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & 956*26ad340eSHenning Colliander 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) { 957*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 958*26ad340eSHenning Colliander "Max Tx count is smaller than expected\n"); 959*26ad340eSHenning Colliander 960*26ad340eSHenning Colliander free_candev(netdev); 961*26ad340eSHenning Colliander return -ENODEV; 962*26ad340eSHenning Colliander } 963*26ad340eSHenning Colliander 964*26ad340eSHenning Colliander can->can.clock.freq = pcie->freq; 965*26ad340eSHenning Colliander can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; 966*26ad340eSHenning Colliander can->echo_idx = 0; 967*26ad340eSHenning Colliander spin_lock_init(&can->echo_lock); 968*26ad340eSHenning Colliander spin_lock_init(&can->lock); 969*26ad340eSHenning Colliander can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 970*26ad340eSHenning Colliander can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 971*26ad340eSHenning Colliander 972*26ad340eSHenning Colliander can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 973*26ad340eSHenning Colliander can->can.do_set_data_bittiming = 974*26ad340eSHenning Colliander kvaser_pciefd_set_data_bittiming; 975*26ad340eSHenning Colliander 976*26ad340eSHenning Colliander can->can.do_set_mode = kvaser_pciefd_set_mode; 977*26ad340eSHenning Colliander can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 978*26ad340eSHenning Colliander 979*26ad340eSHenning Colliander can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 980*26ad340eSHenning Colliander CAN_CTRLMODE_FD | 981*26ad340eSHenning Colliander CAN_CTRLMODE_FD_NON_ISO; 982*26ad340eSHenning Colliander 983*26ad340eSHenning Colliander status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 984*26ad340eSHenning Colliander if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 985*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 986*26ad340eSHenning Colliander "CAN FD not supported as expected %d\n", i); 987*26ad340eSHenning Colliander 988*26ad340eSHenning Colliander free_candev(netdev); 989*26ad340eSHenning Colliander return -ENODEV; 990*26ad340eSHenning Colliander } 991*26ad340eSHenning Colliander 992*26ad340eSHenning Colliander if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 993*26ad340eSHenning Colliander can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 994*26ad340eSHenning Colliander 995*26ad340eSHenning Colliander netdev->flags |= IFF_ECHO; 996*26ad340eSHenning Colliander 997*26ad340eSHenning Colliander SET_NETDEV_DEV(netdev, &pcie->pci->dev); 998*26ad340eSHenning Colliander 999*26ad340eSHenning Colliander iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1000*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | 1001*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_IRQ_TFD, 1002*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1003*26ad340eSHenning Colliander 1004*26ad340eSHenning Colliander pcie->can[i] = can; 1005*26ad340eSHenning Colliander kvaser_pciefd_pwm_start(can); 1006*26ad340eSHenning Colliander } 1007*26ad340eSHenning Colliander 1008*26ad340eSHenning Colliander return 0; 1009*26ad340eSHenning Colliander } 1010*26ad340eSHenning Colliander 1011*26ad340eSHenning Colliander static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 1012*26ad340eSHenning Colliander { 1013*26ad340eSHenning Colliander int i; 1014*26ad340eSHenning Colliander 1015*26ad340eSHenning Colliander for (i = 0; i < pcie->nr_channels; i++) { 1016*26ad340eSHenning Colliander int err = register_candev(pcie->can[i]->can.dev); 1017*26ad340eSHenning Colliander 1018*26ad340eSHenning Colliander if (err) { 1019*26ad340eSHenning Colliander int j; 1020*26ad340eSHenning Colliander 1021*26ad340eSHenning Colliander /* Unregister all successfully registered devices. */ 1022*26ad340eSHenning Colliander for (j = 0; j < i; j++) 1023*26ad340eSHenning Colliander unregister_candev(pcie->can[j]->can.dev); 1024*26ad340eSHenning Colliander return err; 1025*26ad340eSHenning Colliander } 1026*26ad340eSHenning Colliander } 1027*26ad340eSHenning Colliander 1028*26ad340eSHenning Colliander return 0; 1029*26ad340eSHenning Colliander } 1030*26ad340eSHenning Colliander 1031*26ad340eSHenning Colliander static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, 1032*26ad340eSHenning Colliander dma_addr_t addr, int offset) 1033*26ad340eSHenning Colliander { 1034*26ad340eSHenning Colliander u32 word1, word2; 1035*26ad340eSHenning Colliander 1036*26ad340eSHenning Colliander #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1037*26ad340eSHenning Colliander word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; 1038*26ad340eSHenning Colliander word2 = addr >> 32; 1039*26ad340eSHenning Colliander #else 1040*26ad340eSHenning Colliander word1 = addr; 1041*26ad340eSHenning Colliander word2 = 0; 1042*26ad340eSHenning Colliander #endif 1043*26ad340eSHenning Colliander iowrite32(word1, pcie->reg_base + offset); 1044*26ad340eSHenning Colliander iowrite32(word2, pcie->reg_base + offset + 4); 1045*26ad340eSHenning Colliander } 1046*26ad340eSHenning Colliander 1047*26ad340eSHenning Colliander static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1048*26ad340eSHenning Colliander { 1049*26ad340eSHenning Colliander int i; 1050*26ad340eSHenning Colliander u32 srb_status; 1051*26ad340eSHenning Colliander dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1052*26ad340eSHenning Colliander 1053*26ad340eSHenning Colliander /* Disable the DMA */ 1054*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1055*26ad340eSHenning Colliander for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1056*26ad340eSHenning Colliander unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; 1057*26ad340eSHenning Colliander 1058*26ad340eSHenning Colliander pcie->dma_data[i] = 1059*26ad340eSHenning Colliander dmam_alloc_coherent(&pcie->pci->dev, 1060*26ad340eSHenning Colliander KVASER_PCIEFD_DMA_SIZE, 1061*26ad340eSHenning Colliander &dma_addr[i], 1062*26ad340eSHenning Colliander GFP_KERNEL); 1063*26ad340eSHenning Colliander 1064*26ad340eSHenning Colliander if (!pcie->dma_data[i] || !dma_addr[i]) { 1065*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1066*26ad340eSHenning Colliander KVASER_PCIEFD_DMA_SIZE); 1067*26ad340eSHenning Colliander return -ENOMEM; 1068*26ad340eSHenning Colliander } 1069*26ad340eSHenning Colliander 1070*26ad340eSHenning Colliander kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); 1071*26ad340eSHenning Colliander } 1072*26ad340eSHenning Colliander 1073*26ad340eSHenning Colliander /* Reset Rx FIFO, and both DMA buffers */ 1074*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1075*26ad340eSHenning Colliander KVASER_PCIEFD_SRB_CMD_RDB1, 1076*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1077*26ad340eSHenning Colliander 1078*26ad340eSHenning Colliander srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1079*26ad340eSHenning Colliander if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1080*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1081*26ad340eSHenning Colliander return -EIO; 1082*26ad340eSHenning Colliander } 1083*26ad340eSHenning Colliander 1084*26ad340eSHenning Colliander /* Enable the DMA */ 1085*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1086*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1087*26ad340eSHenning Colliander 1088*26ad340eSHenning Colliander return 0; 1089*26ad340eSHenning Colliander } 1090*26ad340eSHenning Colliander 1091*26ad340eSHenning Colliander static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1092*26ad340eSHenning Colliander { 1093*26ad340eSHenning Colliander u32 sysid, srb_status, build; 1094*26ad340eSHenning Colliander u8 sysid_nr_chan; 1095*26ad340eSHenning Colliander int ret; 1096*26ad340eSHenning Colliander 1097*26ad340eSHenning Colliander ret = kvaser_pciefd_read_cfg(pcie); 1098*26ad340eSHenning Colliander if (ret) 1099*26ad340eSHenning Colliander return ret; 1100*26ad340eSHenning Colliander 1101*26ad340eSHenning Colliander sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); 1102*26ad340eSHenning Colliander sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff; 1103*26ad340eSHenning Colliander if (pcie->nr_channels != sysid_nr_chan) { 1104*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 1105*26ad340eSHenning Colliander "Number of channels does not match: %u vs %u\n", 1106*26ad340eSHenning Colliander pcie->nr_channels, 1107*26ad340eSHenning Colliander sysid_nr_chan); 1108*26ad340eSHenning Colliander return -ENODEV; 1109*26ad340eSHenning Colliander } 1110*26ad340eSHenning Colliander 1111*26ad340eSHenning Colliander if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) 1112*26ad340eSHenning Colliander pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; 1113*26ad340eSHenning Colliander 1114*26ad340eSHenning Colliander build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); 1115*26ad340eSHenning Colliander dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", 1116*26ad340eSHenning Colliander (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, 1117*26ad340eSHenning Colliander sysid & 0xff, 1118*26ad340eSHenning Colliander (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); 1119*26ad340eSHenning Colliander 1120*26ad340eSHenning Colliander srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1121*26ad340eSHenning Colliander if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1122*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 1123*26ad340eSHenning Colliander "Hardware without DMA is not supported\n"); 1124*26ad340eSHenning Colliander return -ENODEV; 1125*26ad340eSHenning Colliander } 1126*26ad340eSHenning Colliander 1127*26ad340eSHenning Colliander pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1128*26ad340eSHenning Colliander pcie->freq_to_ticks_div = pcie->freq / 1000000; 1129*26ad340eSHenning Colliander if (pcie->freq_to_ticks_div == 0) 1130*26ad340eSHenning Colliander pcie->freq_to_ticks_div = 1; 1131*26ad340eSHenning Colliander 1132*26ad340eSHenning Colliander /* Turn off all loopback functionality */ 1133*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); 1134*26ad340eSHenning Colliander return ret; 1135*26ad340eSHenning Colliander } 1136*26ad340eSHenning Colliander 1137*26ad340eSHenning Colliander static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1138*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p, 1139*26ad340eSHenning Colliander __le32 *data) 1140*26ad340eSHenning Colliander { 1141*26ad340eSHenning Colliander struct sk_buff *skb; 1142*26ad340eSHenning Colliander struct canfd_frame *cf; 1143*26ad340eSHenning Colliander struct can_priv *priv; 1144*26ad340eSHenning Colliander struct net_device_stats *stats; 1145*26ad340eSHenning Colliander struct skb_shared_hwtstamps *shhwtstamps; 1146*26ad340eSHenning Colliander u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1147*26ad340eSHenning Colliander 1148*26ad340eSHenning Colliander if (ch_id >= pcie->nr_channels) 1149*26ad340eSHenning Colliander return -EIO; 1150*26ad340eSHenning Colliander 1151*26ad340eSHenning Colliander priv = &pcie->can[ch_id]->can; 1152*26ad340eSHenning Colliander stats = &priv->dev->stats; 1153*26ad340eSHenning Colliander 1154*26ad340eSHenning Colliander if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1155*26ad340eSHenning Colliander skb = alloc_canfd_skb(priv->dev, &cf); 1156*26ad340eSHenning Colliander if (!skb) { 1157*26ad340eSHenning Colliander stats->rx_dropped++; 1158*26ad340eSHenning Colliander return -ENOMEM; 1159*26ad340eSHenning Colliander } 1160*26ad340eSHenning Colliander 1161*26ad340eSHenning Colliander if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1162*26ad340eSHenning Colliander cf->flags |= CANFD_BRS; 1163*26ad340eSHenning Colliander 1164*26ad340eSHenning Colliander if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1165*26ad340eSHenning Colliander cf->flags |= CANFD_ESI; 1166*26ad340eSHenning Colliander } else { 1167*26ad340eSHenning Colliander skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1168*26ad340eSHenning Colliander if (!skb) { 1169*26ad340eSHenning Colliander stats->rx_dropped++; 1170*26ad340eSHenning Colliander return -ENOMEM; 1171*26ad340eSHenning Colliander } 1172*26ad340eSHenning Colliander } 1173*26ad340eSHenning Colliander 1174*26ad340eSHenning Colliander cf->can_id = p->header[0] & CAN_EFF_MASK; 1175*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1176*26ad340eSHenning Colliander cf->can_id |= CAN_EFF_FLAG; 1177*26ad340eSHenning Colliander 1178*26ad340eSHenning Colliander cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1179*26ad340eSHenning Colliander 1180*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) 1181*26ad340eSHenning Colliander cf->can_id |= CAN_RTR_FLAG; 1182*26ad340eSHenning Colliander else 1183*26ad340eSHenning Colliander memcpy(cf->data, data, cf->len); 1184*26ad340eSHenning Colliander 1185*26ad340eSHenning Colliander shhwtstamps = skb_hwtstamps(skb); 1186*26ad340eSHenning Colliander 1187*26ad340eSHenning Colliander shhwtstamps->hwtstamp = 1188*26ad340eSHenning Colliander ns_to_ktime(div_u64(p->timestamp * 1000, 1189*26ad340eSHenning Colliander pcie->freq_to_ticks_div)); 1190*26ad340eSHenning Colliander 1191*26ad340eSHenning Colliander stats->rx_bytes += cf->len; 1192*26ad340eSHenning Colliander stats->rx_packets++; 1193*26ad340eSHenning Colliander 1194*26ad340eSHenning Colliander return netif_rx(skb); 1195*26ad340eSHenning Colliander } 1196*26ad340eSHenning Colliander 1197*26ad340eSHenning Colliander static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1198*26ad340eSHenning Colliander struct can_frame *cf, 1199*26ad340eSHenning Colliander enum can_state new_state, 1200*26ad340eSHenning Colliander enum can_state tx_state, 1201*26ad340eSHenning Colliander enum can_state rx_state) 1202*26ad340eSHenning Colliander { 1203*26ad340eSHenning Colliander can_change_state(can->can.dev, cf, tx_state, rx_state); 1204*26ad340eSHenning Colliander 1205*26ad340eSHenning Colliander if (new_state == CAN_STATE_BUS_OFF) { 1206*26ad340eSHenning Colliander struct net_device *ndev = can->can.dev; 1207*26ad340eSHenning Colliander unsigned long irq_flags; 1208*26ad340eSHenning Colliander 1209*26ad340eSHenning Colliander spin_lock_irqsave(&can->lock, irq_flags); 1210*26ad340eSHenning Colliander netif_stop_queue(can->can.dev); 1211*26ad340eSHenning Colliander spin_unlock_irqrestore(&can->lock, irq_flags); 1212*26ad340eSHenning Colliander 1213*26ad340eSHenning Colliander /* Prevent CAN controller from auto recover from bus off */ 1214*26ad340eSHenning Colliander if (!can->can.restart_ms) { 1215*26ad340eSHenning Colliander kvaser_pciefd_start_controller_flush(can); 1216*26ad340eSHenning Colliander can_bus_off(ndev); 1217*26ad340eSHenning Colliander } 1218*26ad340eSHenning Colliander } 1219*26ad340eSHenning Colliander } 1220*26ad340eSHenning Colliander 1221*26ad340eSHenning Colliander static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1222*26ad340eSHenning Colliander struct can_berr_counter *bec, 1223*26ad340eSHenning Colliander enum can_state *new_state, 1224*26ad340eSHenning Colliander enum can_state *tx_state, 1225*26ad340eSHenning Colliander enum can_state *rx_state) 1226*26ad340eSHenning Colliander { 1227*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1228*26ad340eSHenning Colliander p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1229*26ad340eSHenning Colliander *new_state = CAN_STATE_BUS_OFF; 1230*26ad340eSHenning Colliander else if (bec->txerr >= 255 || bec->rxerr >= 255) 1231*26ad340eSHenning Colliander *new_state = CAN_STATE_BUS_OFF; 1232*26ad340eSHenning Colliander else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1233*26ad340eSHenning Colliander *new_state = CAN_STATE_ERROR_PASSIVE; 1234*26ad340eSHenning Colliander else if (bec->txerr >= 128 || bec->rxerr >= 128) 1235*26ad340eSHenning Colliander *new_state = CAN_STATE_ERROR_PASSIVE; 1236*26ad340eSHenning Colliander else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1237*26ad340eSHenning Colliander *new_state = CAN_STATE_ERROR_WARNING; 1238*26ad340eSHenning Colliander else if (bec->txerr >= 96 || bec->rxerr >= 96) 1239*26ad340eSHenning Colliander *new_state = CAN_STATE_ERROR_WARNING; 1240*26ad340eSHenning Colliander else 1241*26ad340eSHenning Colliander *new_state = CAN_STATE_ERROR_ACTIVE; 1242*26ad340eSHenning Colliander 1243*26ad340eSHenning Colliander *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1244*26ad340eSHenning Colliander *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1245*26ad340eSHenning Colliander } 1246*26ad340eSHenning Colliander 1247*26ad340eSHenning Colliander static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1248*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1249*26ad340eSHenning Colliander { 1250*26ad340eSHenning Colliander struct can_berr_counter bec; 1251*26ad340eSHenning Colliander enum can_state old_state, new_state, tx_state, rx_state; 1252*26ad340eSHenning Colliander struct net_device *ndev = can->can.dev; 1253*26ad340eSHenning Colliander struct sk_buff *skb; 1254*26ad340eSHenning Colliander struct can_frame *cf = NULL; 1255*26ad340eSHenning Colliander struct skb_shared_hwtstamps *shhwtstamps; 1256*26ad340eSHenning Colliander struct net_device_stats *stats = &ndev->stats; 1257*26ad340eSHenning Colliander 1258*26ad340eSHenning Colliander old_state = can->can.state; 1259*26ad340eSHenning Colliander 1260*26ad340eSHenning Colliander bec.txerr = p->header[0] & 0xff; 1261*26ad340eSHenning Colliander bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1262*26ad340eSHenning Colliander 1263*26ad340eSHenning Colliander kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1264*26ad340eSHenning Colliander &rx_state); 1265*26ad340eSHenning Colliander 1266*26ad340eSHenning Colliander skb = alloc_can_err_skb(ndev, &cf); 1267*26ad340eSHenning Colliander 1268*26ad340eSHenning Colliander if (new_state != old_state) { 1269*26ad340eSHenning Colliander kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1270*26ad340eSHenning Colliander rx_state); 1271*26ad340eSHenning Colliander 1272*26ad340eSHenning Colliander if (old_state == CAN_STATE_BUS_OFF && 1273*26ad340eSHenning Colliander new_state == CAN_STATE_ERROR_ACTIVE && 1274*26ad340eSHenning Colliander can->can.restart_ms) { 1275*26ad340eSHenning Colliander can->can.can_stats.restarts++; 1276*26ad340eSHenning Colliander if (skb) 1277*26ad340eSHenning Colliander cf->can_id |= CAN_ERR_RESTARTED; 1278*26ad340eSHenning Colliander } 1279*26ad340eSHenning Colliander } 1280*26ad340eSHenning Colliander 1281*26ad340eSHenning Colliander can->err_rep_cnt++; 1282*26ad340eSHenning Colliander can->can.can_stats.bus_error++; 1283*26ad340eSHenning Colliander stats->rx_errors++; 1284*26ad340eSHenning Colliander 1285*26ad340eSHenning Colliander can->bec.txerr = bec.txerr; 1286*26ad340eSHenning Colliander can->bec.rxerr = bec.rxerr; 1287*26ad340eSHenning Colliander 1288*26ad340eSHenning Colliander if (!skb) { 1289*26ad340eSHenning Colliander stats->rx_dropped++; 1290*26ad340eSHenning Colliander return -ENOMEM; 1291*26ad340eSHenning Colliander } 1292*26ad340eSHenning Colliander 1293*26ad340eSHenning Colliander shhwtstamps = skb_hwtstamps(skb); 1294*26ad340eSHenning Colliander shhwtstamps->hwtstamp = 1295*26ad340eSHenning Colliander ns_to_ktime(div_u64(p->timestamp * 1000, 1296*26ad340eSHenning Colliander can->kv_pcie->freq_to_ticks_div)); 1297*26ad340eSHenning Colliander cf->can_id |= CAN_ERR_BUSERROR; 1298*26ad340eSHenning Colliander 1299*26ad340eSHenning Colliander cf->data[6] = bec.txerr; 1300*26ad340eSHenning Colliander cf->data[7] = bec.rxerr; 1301*26ad340eSHenning Colliander 1302*26ad340eSHenning Colliander stats->rx_packets++; 1303*26ad340eSHenning Colliander stats->rx_bytes += cf->can_dlc; 1304*26ad340eSHenning Colliander 1305*26ad340eSHenning Colliander netif_rx(skb); 1306*26ad340eSHenning Colliander return 0; 1307*26ad340eSHenning Colliander } 1308*26ad340eSHenning Colliander 1309*26ad340eSHenning Colliander static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1310*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1311*26ad340eSHenning Colliander { 1312*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1313*26ad340eSHenning Colliander u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1314*26ad340eSHenning Colliander 1315*26ad340eSHenning Colliander if (ch_id >= pcie->nr_channels) 1316*26ad340eSHenning Colliander return -EIO; 1317*26ad340eSHenning Colliander 1318*26ad340eSHenning Colliander can = pcie->can[ch_id]; 1319*26ad340eSHenning Colliander 1320*26ad340eSHenning Colliander kvaser_pciefd_rx_error_frame(can, p); 1321*26ad340eSHenning Colliander if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1322*26ad340eSHenning Colliander /* Do not report more errors, until bec_poll_timer expires */ 1323*26ad340eSHenning Colliander kvaser_pciefd_disable_err_gen(can); 1324*26ad340eSHenning Colliander /* Start polling the error counters */ 1325*26ad340eSHenning Colliander mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1326*26ad340eSHenning Colliander return 0; 1327*26ad340eSHenning Colliander } 1328*26ad340eSHenning Colliander 1329*26ad340eSHenning Colliander static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1330*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1331*26ad340eSHenning Colliander { 1332*26ad340eSHenning Colliander struct can_berr_counter bec; 1333*26ad340eSHenning Colliander enum can_state old_state, new_state, tx_state, rx_state; 1334*26ad340eSHenning Colliander 1335*26ad340eSHenning Colliander old_state = can->can.state; 1336*26ad340eSHenning Colliander 1337*26ad340eSHenning Colliander bec.txerr = p->header[0] & 0xff; 1338*26ad340eSHenning Colliander bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1339*26ad340eSHenning Colliander 1340*26ad340eSHenning Colliander kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1341*26ad340eSHenning Colliander &rx_state); 1342*26ad340eSHenning Colliander 1343*26ad340eSHenning Colliander if (new_state != old_state) { 1344*26ad340eSHenning Colliander struct net_device *ndev = can->can.dev; 1345*26ad340eSHenning Colliander struct sk_buff *skb; 1346*26ad340eSHenning Colliander struct can_frame *cf; 1347*26ad340eSHenning Colliander struct skb_shared_hwtstamps *shhwtstamps; 1348*26ad340eSHenning Colliander 1349*26ad340eSHenning Colliander skb = alloc_can_err_skb(ndev, &cf); 1350*26ad340eSHenning Colliander if (!skb) { 1351*26ad340eSHenning Colliander struct net_device_stats *stats = &ndev->stats; 1352*26ad340eSHenning Colliander 1353*26ad340eSHenning Colliander stats->rx_dropped++; 1354*26ad340eSHenning Colliander return -ENOMEM; 1355*26ad340eSHenning Colliander } 1356*26ad340eSHenning Colliander 1357*26ad340eSHenning Colliander kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1358*26ad340eSHenning Colliander rx_state); 1359*26ad340eSHenning Colliander 1360*26ad340eSHenning Colliander if (old_state == CAN_STATE_BUS_OFF && 1361*26ad340eSHenning Colliander new_state == CAN_STATE_ERROR_ACTIVE && 1362*26ad340eSHenning Colliander can->can.restart_ms) { 1363*26ad340eSHenning Colliander can->can.can_stats.restarts++; 1364*26ad340eSHenning Colliander cf->can_id |= CAN_ERR_RESTARTED; 1365*26ad340eSHenning Colliander } 1366*26ad340eSHenning Colliander 1367*26ad340eSHenning Colliander shhwtstamps = skb_hwtstamps(skb); 1368*26ad340eSHenning Colliander shhwtstamps->hwtstamp = 1369*26ad340eSHenning Colliander ns_to_ktime(div_u64(p->timestamp * 1000, 1370*26ad340eSHenning Colliander can->kv_pcie->freq_to_ticks_div)); 1371*26ad340eSHenning Colliander 1372*26ad340eSHenning Colliander cf->data[6] = bec.txerr; 1373*26ad340eSHenning Colliander cf->data[7] = bec.rxerr; 1374*26ad340eSHenning Colliander 1375*26ad340eSHenning Colliander netif_rx(skb); 1376*26ad340eSHenning Colliander } 1377*26ad340eSHenning Colliander can->bec.txerr = bec.txerr; 1378*26ad340eSHenning Colliander can->bec.rxerr = bec.rxerr; 1379*26ad340eSHenning Colliander /* Check if we need to poll the error counters */ 1380*26ad340eSHenning Colliander if (bec.txerr || bec.rxerr) 1381*26ad340eSHenning Colliander mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1382*26ad340eSHenning Colliander 1383*26ad340eSHenning Colliander return 0; 1384*26ad340eSHenning Colliander } 1385*26ad340eSHenning Colliander 1386*26ad340eSHenning Colliander static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1387*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1388*26ad340eSHenning Colliander { 1389*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1390*26ad340eSHenning Colliander u8 cmdseq; 1391*26ad340eSHenning Colliander u32 status; 1392*26ad340eSHenning Colliander u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1393*26ad340eSHenning Colliander 1394*26ad340eSHenning Colliander if (ch_id >= pcie->nr_channels) 1395*26ad340eSHenning Colliander return -EIO; 1396*26ad340eSHenning Colliander 1397*26ad340eSHenning Colliander can = pcie->can[ch_id]; 1398*26ad340eSHenning Colliander 1399*26ad340eSHenning Colliander status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1400*26ad340eSHenning Colliander cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; 1401*26ad340eSHenning Colliander 1402*26ad340eSHenning Colliander /* Reset done, start abort and flush */ 1403*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1404*26ad340eSHenning Colliander p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1405*26ad340eSHenning Colliander p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1406*26ad340eSHenning Colliander cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1407*26ad340eSHenning Colliander status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1408*26ad340eSHenning Colliander u32 cmd; 1409*26ad340eSHenning Colliander 1410*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1411*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1412*26ad340eSHenning Colliander cmd = KVASER_PCIEFD_KCAN_CMD_AT; 1413*26ad340eSHenning Colliander cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 1414*26ad340eSHenning Colliander iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 1415*26ad340eSHenning Colliander 1416*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, 1417*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1418*26ad340eSHenning Colliander } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1419*26ad340eSHenning Colliander p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1420*26ad340eSHenning Colliander cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1421*26ad340eSHenning Colliander status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1422*26ad340eSHenning Colliander /* Reset detected, send end of flush if no packet are in FIFO */ 1423*26ad340eSHenning Colliander u8 count = ioread32(can->reg_base + 1424*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1425*26ad340eSHenning Colliander 1426*26ad340eSHenning Colliander if (!count) 1427*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1428*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1429*26ad340eSHenning Colliander } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1430*26ad340eSHenning Colliander cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { 1431*26ad340eSHenning Colliander /* Response to status request received */ 1432*26ad340eSHenning Colliander kvaser_pciefd_handle_status_resp(can, p); 1433*26ad340eSHenning Colliander if (can->can.state != CAN_STATE_BUS_OFF && 1434*26ad340eSHenning Colliander can->can.state != CAN_STATE_ERROR_ACTIVE) { 1435*26ad340eSHenning Colliander mod_timer(&can->bec_poll_timer, 1436*26ad340eSHenning Colliander KVASER_PCIEFD_BEC_POLL_FREQ); 1437*26ad340eSHenning Colliander } 1438*26ad340eSHenning Colliander } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1439*26ad340eSHenning Colliander !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { 1440*26ad340eSHenning Colliander /* Reset to bus on detected */ 1441*26ad340eSHenning Colliander if (!completion_done(&can->start_comp)) 1442*26ad340eSHenning Colliander complete(&can->start_comp); 1443*26ad340eSHenning Colliander } 1444*26ad340eSHenning Colliander 1445*26ad340eSHenning Colliander return 0; 1446*26ad340eSHenning Colliander } 1447*26ad340eSHenning Colliander 1448*26ad340eSHenning Colliander static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, 1449*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1450*26ad340eSHenning Colliander { 1451*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1452*26ad340eSHenning Colliander u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1453*26ad340eSHenning Colliander 1454*26ad340eSHenning Colliander if (ch_id >= pcie->nr_channels) 1455*26ad340eSHenning Colliander return -EIO; 1456*26ad340eSHenning Colliander 1457*26ad340eSHenning Colliander can = pcie->can[ch_id]; 1458*26ad340eSHenning Colliander 1459*26ad340eSHenning Colliander /* If this is the last flushed packet, send end of flush */ 1460*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1461*26ad340eSHenning Colliander u8 count = ioread32(can->reg_base + 1462*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1463*26ad340eSHenning Colliander 1464*26ad340eSHenning Colliander if (count == 0) 1465*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1466*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1467*26ad340eSHenning Colliander } else { 1468*26ad340eSHenning Colliander int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1469*26ad340eSHenning Colliander int dlc = can_get_echo_skb(can->can.dev, echo_idx); 1470*26ad340eSHenning Colliander struct net_device_stats *stats = &can->can.dev->stats; 1471*26ad340eSHenning Colliander 1472*26ad340eSHenning Colliander stats->tx_bytes += dlc; 1473*26ad340eSHenning Colliander stats->tx_packets++; 1474*26ad340eSHenning Colliander 1475*26ad340eSHenning Colliander if (netif_queue_stopped(can->can.dev)) 1476*26ad340eSHenning Colliander netif_wake_queue(can->can.dev); 1477*26ad340eSHenning Colliander } 1478*26ad340eSHenning Colliander 1479*26ad340eSHenning Colliander return 0; 1480*26ad340eSHenning Colliander } 1481*26ad340eSHenning Colliander 1482*26ad340eSHenning Colliander static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1483*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1484*26ad340eSHenning Colliander { 1485*26ad340eSHenning Colliander struct sk_buff *skb; 1486*26ad340eSHenning Colliander struct net_device_stats *stats = &can->can.dev->stats; 1487*26ad340eSHenning Colliander struct can_frame *cf; 1488*26ad340eSHenning Colliander 1489*26ad340eSHenning Colliander skb = alloc_can_err_skb(can->can.dev, &cf); 1490*26ad340eSHenning Colliander 1491*26ad340eSHenning Colliander stats->tx_errors++; 1492*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1493*26ad340eSHenning Colliander if (skb) 1494*26ad340eSHenning Colliander cf->can_id |= CAN_ERR_LOSTARB; 1495*26ad340eSHenning Colliander can->can.can_stats.arbitration_lost++; 1496*26ad340eSHenning Colliander } else if (skb) { 1497*26ad340eSHenning Colliander cf->can_id |= CAN_ERR_ACK; 1498*26ad340eSHenning Colliander } 1499*26ad340eSHenning Colliander 1500*26ad340eSHenning Colliander if (skb) { 1501*26ad340eSHenning Colliander cf->can_id |= CAN_ERR_BUSERROR; 1502*26ad340eSHenning Colliander stats->rx_bytes += cf->can_dlc; 1503*26ad340eSHenning Colliander stats->rx_packets++; 1504*26ad340eSHenning Colliander netif_rx(skb); 1505*26ad340eSHenning Colliander } else { 1506*26ad340eSHenning Colliander stats->rx_dropped++; 1507*26ad340eSHenning Colliander netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1508*26ad340eSHenning Colliander } 1509*26ad340eSHenning Colliander } 1510*26ad340eSHenning Colliander 1511*26ad340eSHenning Colliander static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1512*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1513*26ad340eSHenning Colliander { 1514*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1515*26ad340eSHenning Colliander bool one_shot_fail = false; 1516*26ad340eSHenning Colliander u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1517*26ad340eSHenning Colliander 1518*26ad340eSHenning Colliander if (ch_id >= pcie->nr_channels) 1519*26ad340eSHenning Colliander return -EIO; 1520*26ad340eSHenning Colliander 1521*26ad340eSHenning Colliander can = pcie->can[ch_id]; 1522*26ad340eSHenning Colliander /* Ignore control packet ACK */ 1523*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1524*26ad340eSHenning Colliander return 0; 1525*26ad340eSHenning Colliander 1526*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1527*26ad340eSHenning Colliander kvaser_pciefd_handle_nack_packet(can, p); 1528*26ad340eSHenning Colliander one_shot_fail = true; 1529*26ad340eSHenning Colliander } 1530*26ad340eSHenning Colliander 1531*26ad340eSHenning Colliander if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1532*26ad340eSHenning Colliander netdev_dbg(can->can.dev, "Packet was flushed\n"); 1533*26ad340eSHenning Colliander } else { 1534*26ad340eSHenning Colliander int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1535*26ad340eSHenning Colliander int dlc = can_get_echo_skb(can->can.dev, echo_idx); 1536*26ad340eSHenning Colliander u8 count = ioread32(can->reg_base + 1537*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1538*26ad340eSHenning Colliander 1539*26ad340eSHenning Colliander if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && 1540*26ad340eSHenning Colliander netif_queue_stopped(can->can.dev)) 1541*26ad340eSHenning Colliander netif_wake_queue(can->can.dev); 1542*26ad340eSHenning Colliander 1543*26ad340eSHenning Colliander if (!one_shot_fail) { 1544*26ad340eSHenning Colliander struct net_device_stats *stats = &can->can.dev->stats; 1545*26ad340eSHenning Colliander 1546*26ad340eSHenning Colliander stats->tx_bytes += dlc; 1547*26ad340eSHenning Colliander stats->tx_packets++; 1548*26ad340eSHenning Colliander } 1549*26ad340eSHenning Colliander } 1550*26ad340eSHenning Colliander 1551*26ad340eSHenning Colliander return 0; 1552*26ad340eSHenning Colliander } 1553*26ad340eSHenning Colliander 1554*26ad340eSHenning Colliander static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1555*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p) 1556*26ad340eSHenning Colliander { 1557*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1558*26ad340eSHenning Colliander u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1559*26ad340eSHenning Colliander 1560*26ad340eSHenning Colliander if (ch_id >= pcie->nr_channels) 1561*26ad340eSHenning Colliander return -EIO; 1562*26ad340eSHenning Colliander 1563*26ad340eSHenning Colliander can = pcie->can[ch_id]; 1564*26ad340eSHenning Colliander 1565*26ad340eSHenning Colliander if (!completion_done(&can->flush_comp)) 1566*26ad340eSHenning Colliander complete(&can->flush_comp); 1567*26ad340eSHenning Colliander 1568*26ad340eSHenning Colliander return 0; 1569*26ad340eSHenning Colliander } 1570*26ad340eSHenning Colliander 1571*26ad340eSHenning Colliander static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1572*26ad340eSHenning Colliander int dma_buf) 1573*26ad340eSHenning Colliander { 1574*26ad340eSHenning Colliander __le32 *buffer = pcie->dma_data[dma_buf]; 1575*26ad340eSHenning Colliander __le64 timestamp; 1576*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet packet; 1577*26ad340eSHenning Colliander struct kvaser_pciefd_rx_packet *p = &packet; 1578*26ad340eSHenning Colliander u8 type; 1579*26ad340eSHenning Colliander int pos = *start_pos; 1580*26ad340eSHenning Colliander int size; 1581*26ad340eSHenning Colliander int ret = 0; 1582*26ad340eSHenning Colliander 1583*26ad340eSHenning Colliander size = le32_to_cpu(buffer[pos++]); 1584*26ad340eSHenning Colliander if (!size) { 1585*26ad340eSHenning Colliander *start_pos = 0; 1586*26ad340eSHenning Colliander return 0; 1587*26ad340eSHenning Colliander } 1588*26ad340eSHenning Colliander 1589*26ad340eSHenning Colliander p->header[0] = le32_to_cpu(buffer[pos++]); 1590*26ad340eSHenning Colliander p->header[1] = le32_to_cpu(buffer[pos++]); 1591*26ad340eSHenning Colliander 1592*26ad340eSHenning Colliander /* Read 64-bit timestamp */ 1593*26ad340eSHenning Colliander memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1594*26ad340eSHenning Colliander pos += 2; 1595*26ad340eSHenning Colliander p->timestamp = le64_to_cpu(timestamp); 1596*26ad340eSHenning Colliander 1597*26ad340eSHenning Colliander type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; 1598*26ad340eSHenning Colliander switch (type) { 1599*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_DATA: 1600*26ad340eSHenning Colliander ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1601*26ad340eSHenning Colliander if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1602*26ad340eSHenning Colliander u8 data_len; 1603*26ad340eSHenning Colliander 1604*26ad340eSHenning Colliander data_len = can_dlc2len(p->header[1] >> 1605*26ad340eSHenning Colliander KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1606*26ad340eSHenning Colliander pos += DIV_ROUND_UP(data_len, 4); 1607*26ad340eSHenning Colliander } 1608*26ad340eSHenning Colliander break; 1609*26ad340eSHenning Colliander 1610*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_ACK: 1611*26ad340eSHenning Colliander ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1612*26ad340eSHenning Colliander break; 1613*26ad340eSHenning Colliander 1614*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_STATUS: 1615*26ad340eSHenning Colliander ret = kvaser_pciefd_handle_status_packet(pcie, p); 1616*26ad340eSHenning Colliander break; 1617*26ad340eSHenning Colliander 1618*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_ERROR: 1619*26ad340eSHenning Colliander ret = kvaser_pciefd_handle_error_packet(pcie, p); 1620*26ad340eSHenning Colliander break; 1621*26ad340eSHenning Colliander 1622*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1623*26ad340eSHenning Colliander ret = kvaser_pciefd_handle_eack_packet(pcie, p); 1624*26ad340eSHenning Colliander break; 1625*26ad340eSHenning Colliander 1626*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1627*26ad340eSHenning Colliander ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1628*26ad340eSHenning Colliander break; 1629*26ad340eSHenning Colliander 1630*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1631*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1632*26ad340eSHenning Colliander case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1633*26ad340eSHenning Colliander dev_info(&pcie->pci->dev, 1634*26ad340eSHenning Colliander "Received unexpected packet type 0x%08X\n", type); 1635*26ad340eSHenning Colliander break; 1636*26ad340eSHenning Colliander 1637*26ad340eSHenning Colliander default: 1638*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1639*26ad340eSHenning Colliander ret = -EIO; 1640*26ad340eSHenning Colliander break; 1641*26ad340eSHenning Colliander } 1642*26ad340eSHenning Colliander 1643*26ad340eSHenning Colliander if (ret) 1644*26ad340eSHenning Colliander return ret; 1645*26ad340eSHenning Colliander 1646*26ad340eSHenning Colliander /* Position does not point to the end of the package, 1647*26ad340eSHenning Colliander * corrupted packet size? 1648*26ad340eSHenning Colliander */ 1649*26ad340eSHenning Colliander if ((*start_pos + size) != pos) 1650*26ad340eSHenning Colliander return -EIO; 1651*26ad340eSHenning Colliander 1652*26ad340eSHenning Colliander /* Point to the next packet header, if any */ 1653*26ad340eSHenning Colliander *start_pos = pos; 1654*26ad340eSHenning Colliander 1655*26ad340eSHenning Colliander return ret; 1656*26ad340eSHenning Colliander } 1657*26ad340eSHenning Colliander 1658*26ad340eSHenning Colliander static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1659*26ad340eSHenning Colliander { 1660*26ad340eSHenning Colliander int pos = 0; 1661*26ad340eSHenning Colliander int res = 0; 1662*26ad340eSHenning Colliander 1663*26ad340eSHenning Colliander do { 1664*26ad340eSHenning Colliander res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1665*26ad340eSHenning Colliander } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1666*26ad340eSHenning Colliander 1667*26ad340eSHenning Colliander return res; 1668*26ad340eSHenning Colliander } 1669*26ad340eSHenning Colliander 1670*26ad340eSHenning Colliander static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1671*26ad340eSHenning Colliander { 1672*26ad340eSHenning Colliander u32 irq; 1673*26ad340eSHenning Colliander 1674*26ad340eSHenning Colliander irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1675*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1676*26ad340eSHenning Colliander kvaser_pciefd_read_buffer(pcie, 0); 1677*26ad340eSHenning Colliander /* Reset DMA buffer 0 */ 1678*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1679*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1680*26ad340eSHenning Colliander } 1681*26ad340eSHenning Colliander 1682*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1683*26ad340eSHenning Colliander kvaser_pciefd_read_buffer(pcie, 1); 1684*26ad340eSHenning Colliander /* Reset DMA buffer 1 */ 1685*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1686*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1687*26ad340eSHenning Colliander } 1688*26ad340eSHenning Colliander 1689*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1690*26ad340eSHenning Colliander irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1691*26ad340eSHenning Colliander irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1692*26ad340eSHenning Colliander irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1693*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1694*26ad340eSHenning Colliander 1695*26ad340eSHenning Colliander iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1696*26ad340eSHenning Colliander return 0; 1697*26ad340eSHenning Colliander } 1698*26ad340eSHenning Colliander 1699*26ad340eSHenning Colliander static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1700*26ad340eSHenning Colliander { 1701*26ad340eSHenning Colliander u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1702*26ad340eSHenning Colliander 1703*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1704*26ad340eSHenning Colliander netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1705*26ad340eSHenning Colliander 1706*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { 1707*26ad340eSHenning Colliander u8 count = ioread32(can->reg_base + 1708*26ad340eSHenning Colliander KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1709*26ad340eSHenning Colliander 1710*26ad340eSHenning Colliander if (count == 0) 1711*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1712*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1713*26ad340eSHenning Colliander } 1714*26ad340eSHenning Colliander 1715*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1716*26ad340eSHenning Colliander netdev_err(can->can.dev, 1717*26ad340eSHenning Colliander "Fail to change bittiming, when not in reset mode\n"); 1718*26ad340eSHenning Colliander 1719*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1720*26ad340eSHenning Colliander netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1721*26ad340eSHenning Colliander 1722*26ad340eSHenning Colliander if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1723*26ad340eSHenning Colliander netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1724*26ad340eSHenning Colliander 1725*26ad340eSHenning Colliander iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1726*26ad340eSHenning Colliander return 0; 1727*26ad340eSHenning Colliander } 1728*26ad340eSHenning Colliander 1729*26ad340eSHenning Colliander static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1730*26ad340eSHenning Colliander { 1731*26ad340eSHenning Colliander struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1732*26ad340eSHenning Colliander u32 board_irq; 1733*26ad340eSHenning Colliander int i; 1734*26ad340eSHenning Colliander 1735*26ad340eSHenning Colliander board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1736*26ad340eSHenning Colliander 1737*26ad340eSHenning Colliander if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) 1738*26ad340eSHenning Colliander return IRQ_NONE; 1739*26ad340eSHenning Colliander 1740*26ad340eSHenning Colliander if (board_irq & KVASER_PCIEFD_IRQ_SRB) 1741*26ad340eSHenning Colliander kvaser_pciefd_receive_irq(pcie); 1742*26ad340eSHenning Colliander 1743*26ad340eSHenning Colliander for (i = 0; i < pcie->nr_channels; i++) { 1744*26ad340eSHenning Colliander if (!pcie->can[i]) { 1745*26ad340eSHenning Colliander dev_err(&pcie->pci->dev, 1746*26ad340eSHenning Colliander "IRQ mask points to unallocated controller\n"); 1747*26ad340eSHenning Colliander break; 1748*26ad340eSHenning Colliander } 1749*26ad340eSHenning Colliander 1750*26ad340eSHenning Colliander /* Check that mask matches channel (i) IRQ mask */ 1751*26ad340eSHenning Colliander if (board_irq & (1 << i)) 1752*26ad340eSHenning Colliander kvaser_pciefd_transmit_irq(pcie->can[i]); 1753*26ad340eSHenning Colliander } 1754*26ad340eSHenning Colliander 1755*26ad340eSHenning Colliander iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1756*26ad340eSHenning Colliander return IRQ_HANDLED; 1757*26ad340eSHenning Colliander } 1758*26ad340eSHenning Colliander 1759*26ad340eSHenning Colliander static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1760*26ad340eSHenning Colliander { 1761*26ad340eSHenning Colliander int i; 1762*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1763*26ad340eSHenning Colliander 1764*26ad340eSHenning Colliander for (i = 0; i < pcie->nr_channels; i++) { 1765*26ad340eSHenning Colliander can = pcie->can[i]; 1766*26ad340eSHenning Colliander if (can) { 1767*26ad340eSHenning Colliander iowrite32(0, 1768*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1769*26ad340eSHenning Colliander kvaser_pciefd_pwm_stop(can); 1770*26ad340eSHenning Colliander free_candev(can->can.dev); 1771*26ad340eSHenning Colliander } 1772*26ad340eSHenning Colliander } 1773*26ad340eSHenning Colliander } 1774*26ad340eSHenning Colliander 1775*26ad340eSHenning Colliander static int kvaser_pciefd_probe(struct pci_dev *pdev, 1776*26ad340eSHenning Colliander const struct pci_device_id *id) 1777*26ad340eSHenning Colliander { 1778*26ad340eSHenning Colliander int err; 1779*26ad340eSHenning Colliander struct kvaser_pciefd *pcie; 1780*26ad340eSHenning Colliander 1781*26ad340eSHenning Colliander pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1782*26ad340eSHenning Colliander if (!pcie) 1783*26ad340eSHenning Colliander return -ENOMEM; 1784*26ad340eSHenning Colliander 1785*26ad340eSHenning Colliander pci_set_drvdata(pdev, pcie); 1786*26ad340eSHenning Colliander pcie->pci = pdev; 1787*26ad340eSHenning Colliander 1788*26ad340eSHenning Colliander err = pci_enable_device(pdev); 1789*26ad340eSHenning Colliander if (err) 1790*26ad340eSHenning Colliander return err; 1791*26ad340eSHenning Colliander 1792*26ad340eSHenning Colliander err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1793*26ad340eSHenning Colliander if (err) 1794*26ad340eSHenning Colliander goto err_disable_pci; 1795*26ad340eSHenning Colliander 1796*26ad340eSHenning Colliander pcie->reg_base = pci_iomap(pdev, 0, 0); 1797*26ad340eSHenning Colliander if (!pcie->reg_base) { 1798*26ad340eSHenning Colliander err = -ENOMEM; 1799*26ad340eSHenning Colliander goto err_release_regions; 1800*26ad340eSHenning Colliander } 1801*26ad340eSHenning Colliander 1802*26ad340eSHenning Colliander err = kvaser_pciefd_setup_board(pcie); 1803*26ad340eSHenning Colliander if (err) 1804*26ad340eSHenning Colliander goto err_pci_iounmap; 1805*26ad340eSHenning Colliander 1806*26ad340eSHenning Colliander err = kvaser_pciefd_setup_dma(pcie); 1807*26ad340eSHenning Colliander if (err) 1808*26ad340eSHenning Colliander goto err_pci_iounmap; 1809*26ad340eSHenning Colliander 1810*26ad340eSHenning Colliander pci_set_master(pdev); 1811*26ad340eSHenning Colliander 1812*26ad340eSHenning Colliander err = kvaser_pciefd_setup_can_ctrls(pcie); 1813*26ad340eSHenning Colliander if (err) 1814*26ad340eSHenning Colliander goto err_teardown_can_ctrls; 1815*26ad340eSHenning Colliander 1816*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1817*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1818*26ad340eSHenning Colliander 1819*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1820*26ad340eSHenning Colliander KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1821*26ad340eSHenning Colliander KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1822*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); 1823*26ad340eSHenning Colliander 1824*26ad340eSHenning Colliander /* Reset IRQ handling, expected to be off before */ 1825*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1826*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1827*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1828*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1829*26ad340eSHenning Colliander 1830*26ad340eSHenning Colliander /* Ready the DMA buffers */ 1831*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1832*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1833*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1834*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1835*26ad340eSHenning Colliander 1836*26ad340eSHenning Colliander err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1837*26ad340eSHenning Colliander IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1838*26ad340eSHenning Colliander if (err) 1839*26ad340eSHenning Colliander goto err_teardown_can_ctrls; 1840*26ad340eSHenning Colliander 1841*26ad340eSHenning Colliander err = kvaser_pciefd_reg_candev(pcie); 1842*26ad340eSHenning Colliander if (err) 1843*26ad340eSHenning Colliander goto err_free_irq; 1844*26ad340eSHenning Colliander 1845*26ad340eSHenning Colliander return 0; 1846*26ad340eSHenning Colliander 1847*26ad340eSHenning Colliander err_free_irq: 1848*26ad340eSHenning Colliander free_irq(pcie->pci->irq, pcie); 1849*26ad340eSHenning Colliander 1850*26ad340eSHenning Colliander err_teardown_can_ctrls: 1851*26ad340eSHenning Colliander kvaser_pciefd_teardown_can_ctrls(pcie); 1852*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1853*26ad340eSHenning Colliander pci_clear_master(pdev); 1854*26ad340eSHenning Colliander 1855*26ad340eSHenning Colliander err_pci_iounmap: 1856*26ad340eSHenning Colliander pci_iounmap(pdev, pcie->reg_base); 1857*26ad340eSHenning Colliander 1858*26ad340eSHenning Colliander err_release_regions: 1859*26ad340eSHenning Colliander pci_release_regions(pdev); 1860*26ad340eSHenning Colliander 1861*26ad340eSHenning Colliander err_disable_pci: 1862*26ad340eSHenning Colliander pci_disable_device(pdev); 1863*26ad340eSHenning Colliander 1864*26ad340eSHenning Colliander return err; 1865*26ad340eSHenning Colliander } 1866*26ad340eSHenning Colliander 1867*26ad340eSHenning Colliander static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1868*26ad340eSHenning Colliander { 1869*26ad340eSHenning Colliander struct kvaser_pciefd_can *can; 1870*26ad340eSHenning Colliander int i; 1871*26ad340eSHenning Colliander 1872*26ad340eSHenning Colliander for (i = 0; i < pcie->nr_channels; i++) { 1873*26ad340eSHenning Colliander can = pcie->can[i]; 1874*26ad340eSHenning Colliander if (can) { 1875*26ad340eSHenning Colliander iowrite32(0, 1876*26ad340eSHenning Colliander can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1877*26ad340eSHenning Colliander unregister_candev(can->can.dev); 1878*26ad340eSHenning Colliander del_timer(&can->bec_poll_timer); 1879*26ad340eSHenning Colliander kvaser_pciefd_pwm_stop(can); 1880*26ad340eSHenning Colliander free_candev(can->can.dev); 1881*26ad340eSHenning Colliander } 1882*26ad340eSHenning Colliander } 1883*26ad340eSHenning Colliander } 1884*26ad340eSHenning Colliander 1885*26ad340eSHenning Colliander static void kvaser_pciefd_remove(struct pci_dev *pdev) 1886*26ad340eSHenning Colliander { 1887*26ad340eSHenning Colliander struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1888*26ad340eSHenning Colliander 1889*26ad340eSHenning Colliander kvaser_pciefd_remove_all_ctrls(pcie); 1890*26ad340eSHenning Colliander 1891*26ad340eSHenning Colliander /* Turn off IRQ generation */ 1892*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1893*26ad340eSHenning Colliander iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1894*26ad340eSHenning Colliander pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1895*26ad340eSHenning Colliander iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1896*26ad340eSHenning Colliander 1897*26ad340eSHenning Colliander free_irq(pcie->pci->irq, pcie); 1898*26ad340eSHenning Colliander 1899*26ad340eSHenning Colliander pci_clear_master(pdev); 1900*26ad340eSHenning Colliander pci_iounmap(pdev, pcie->reg_base); 1901*26ad340eSHenning Colliander pci_release_regions(pdev); 1902*26ad340eSHenning Colliander pci_disable_device(pdev); 1903*26ad340eSHenning Colliander } 1904*26ad340eSHenning Colliander 1905*26ad340eSHenning Colliander static struct pci_driver kvaser_pciefd = { 1906*26ad340eSHenning Colliander .name = KVASER_PCIEFD_DRV_NAME, 1907*26ad340eSHenning Colliander .id_table = kvaser_pciefd_id_table, 1908*26ad340eSHenning Colliander .probe = kvaser_pciefd_probe, 1909*26ad340eSHenning Colliander .remove = kvaser_pciefd_remove, 1910*26ad340eSHenning Colliander }; 1911*26ad340eSHenning Colliander 1912*26ad340eSHenning Colliander module_pci_driver(kvaser_pciefd) 1913