1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.42) 5 * - PEAK linux canfd driver 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/can/dev.h> 10 #include <linux/device.h> 11 #include <linux/ethtool.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/minmax.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/pci.h> 18 #include <linux/timer.h> 19 20 MODULE_LICENSE("Dual BSD/GPL"); 21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 23 24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 25 26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 28 #define KVASER_PCIEFD_MAX_ERR_REP 256U 29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U 30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL 31 #define KVASER_PCIEFD_DMA_COUNT 2U 32 33 #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) 34 35 #define KVASER_PCIEFD_VENDOR 0x1a07 36 /* Altera based devices */ 37 #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d 38 #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e 39 #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f 40 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 41 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 42 43 /* SmartFusion2 based devices */ 44 #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 45 #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 46 #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 47 #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 48 #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 49 50 /* Altera SerDes Enable 64-bit DMA address translation */ 51 #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) 52 53 /* SmartFusion2 SerDes LSB address translation mask */ 54 #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) 55 56 /* Kvaser KCAN CAN controller registers */ 57 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 58 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 59 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 60 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 61 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 62 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 63 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 64 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 65 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 66 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 67 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 68 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 69 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 70 /* System identification and information registers */ 71 #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 72 #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc 73 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 74 #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 75 /* Shared receive buffer FIFO registers */ 76 #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 77 /* Shared receive buffer registers */ 78 #define KVASER_PCIEFD_SRB_CMD_REG 0x0 79 #define KVASER_PCIEFD_SRB_IEN_REG 0x04 80 #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c 81 #define KVASER_PCIEFD_SRB_STAT_REG 0x10 82 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 83 #define KVASER_PCIEFD_SRB_CTRL_REG 0x18 84 85 /* System build information fields */ 86 #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) 87 #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) 88 #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) 89 #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) 90 91 /* Reset DMA buffer 0, 1 and FIFO offset */ 92 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 93 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 94 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 95 96 /* DMA underflow, buffer 0 and 1 */ 97 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 98 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 99 /* DMA overflow, buffer 0 and 1 */ 100 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 101 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 102 /* DMA packet done, buffer 0 and 1 */ 103 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 104 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 105 106 /* Got DMA support */ 107 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 108 /* DMA idle */ 109 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 110 111 /* SRB current packet level */ 112 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) 113 114 /* DMA Enable */ 115 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 116 117 /* KCAN CTRL packet types */ 118 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) 119 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 120 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 121 122 /* Command sequence number */ 123 #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) 124 /* Command bits */ 125 #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) 126 /* Abort, flush and reset */ 127 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 128 /* Request status packet */ 129 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 130 131 /* Transmitter unaligned */ 132 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 133 /* Tx FIFO empty */ 134 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 135 /* Tx FIFO overflow */ 136 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 137 /* Tx buffer flush done */ 138 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 139 /* Abort done */ 140 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 141 /* Rx FIFO overflow */ 142 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 143 /* FDF bit when controller is in classic CAN mode */ 144 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 145 /* Bus parameter protection error */ 146 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 147 /* Tx FIFO unaligned end */ 148 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 149 /* Tx FIFO unaligned read */ 150 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 151 152 /* Tx FIFO size */ 153 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) 154 /* Tx FIFO current packet level */ 155 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) 156 157 /* Current status packet sequence number */ 158 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) 159 /* Controller got CAN FD capability */ 160 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 161 /* Controller got one-shot capability */ 162 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 163 /* Controller in reset mode */ 164 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 165 /* Reset mode request */ 166 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 167 /* Bus off */ 168 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 169 /* Idle state. Controller in reset mode and no abort or flush pending */ 170 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 171 /* Abort request */ 172 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 173 /* Controller is bus off */ 174 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ 175 (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ 176 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) 177 178 /* Classic CAN mode */ 179 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 180 /* Active error flag enable. Clear to force error passive */ 181 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 182 /* Acknowledgment packet type */ 183 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 184 /* CAN FD non-ISO */ 185 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 186 /* Error packet enable */ 187 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 188 /* Listen only mode */ 189 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 190 /* Reset mode */ 191 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 192 193 /* BTRN and BTRD fields */ 194 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) 195 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) 196 #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) 197 #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) 198 199 /* PWM Control fields */ 200 #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) 201 #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) 202 203 /* KCAN packet type IDs */ 204 #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 205 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 206 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 207 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 208 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 209 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 210 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 211 #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 212 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 213 214 /* Common KCAN packet definitions, second word */ 215 #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) 216 #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) 217 #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) 218 219 /* KCAN Transmit/Receive data packet, first word */ 220 #define KVASER_PCIEFD_RPACKET_IDE BIT(30) 221 #define KVASER_PCIEFD_RPACKET_RTR BIT(29) 222 #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) 223 /* KCAN Transmit data packet, second word */ 224 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 225 #define KVASER_PCIEFD_TPACKET_SMS BIT(16) 226 /* KCAN Transmit/Receive data packet, second word */ 227 #define KVASER_PCIEFD_RPACKET_FDF BIT(15) 228 #define KVASER_PCIEFD_RPACKET_BRS BIT(14) 229 #define KVASER_PCIEFD_RPACKET_ESI BIT(13) 230 #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) 231 232 /* KCAN Transmit acknowledge packet, first word */ 233 #define KVASER_PCIEFD_APACKET_NACK BIT(11) 234 #define KVASER_PCIEFD_APACKET_ABL BIT(10) 235 #define KVASER_PCIEFD_APACKET_CT BIT(9) 236 #define KVASER_PCIEFD_APACKET_FLU BIT(8) 237 238 /* KCAN Status packet, first word */ 239 #define KVASER_PCIEFD_SPACK_RMCD BIT(22) 240 #define KVASER_PCIEFD_SPACK_IRM BIT(21) 241 #define KVASER_PCIEFD_SPACK_IDET BIT(20) 242 #define KVASER_PCIEFD_SPACK_BOFF BIT(16) 243 #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) 244 #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) 245 /* KCAN Status packet, second word */ 246 #define KVASER_PCIEFD_SPACK_EPLR BIT(24) 247 #define KVASER_PCIEFD_SPACK_EWLR BIT(23) 248 #define KVASER_PCIEFD_SPACK_AUTO BIT(21) 249 250 /* KCAN Error detected packet, second word */ 251 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 252 253 /* Macros for calculating addresses of registers */ 254 #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ 255 ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) 256 #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ 257 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) 258 #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ 259 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) 260 #define KVASER_PCIEFD_SERDES_ADDR(pcie) \ 261 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) 262 #define KVASER_PCIEFD_SYSID_ADDR(pcie) \ 263 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) 264 #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ 265 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) 266 #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ 267 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) 268 #define KVASER_PCIEFD_SRB_ADDR(pcie) \ 269 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) 270 #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ 271 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) 272 #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ 273 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) 274 #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ 275 (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) 276 #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ 277 (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) 278 279 struct kvaser_pciefd; 280 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 281 dma_addr_t addr, int index); 282 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 283 dma_addr_t addr, int index); 284 285 struct kvaser_pciefd_address_offset { 286 u32 serdes; 287 u32 pci_ien; 288 u32 pci_irq; 289 u32 sysid; 290 u32 loopback; 291 u32 kcan_srb_fifo; 292 u32 kcan_srb; 293 u32 kcan_ch0; 294 u32 kcan_ch1; 295 }; 296 297 struct kvaser_pciefd_dev_ops { 298 void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, 299 dma_addr_t addr, int index); 300 }; 301 302 struct kvaser_pciefd_irq_mask { 303 u32 kcan_rx0; 304 u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 305 u32 all; 306 }; 307 308 struct kvaser_pciefd_driver_data { 309 const struct kvaser_pciefd_address_offset *address_offset; 310 const struct kvaser_pciefd_irq_mask *irq_mask; 311 const struct kvaser_pciefd_dev_ops *ops; 312 }; 313 314 static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { 315 .serdes = 0x1000, 316 .pci_ien = 0x50, 317 .pci_irq = 0x40, 318 .sysid = 0x1f020, 319 .loopback = 0x1f000, 320 .kcan_srb_fifo = 0x1f200, 321 .kcan_srb = 0x1f400, 322 .kcan_ch0 = 0x10000, 323 .kcan_ch1 = 0x11000, 324 }; 325 326 static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { 327 .serdes = 0x280c8, 328 .pci_ien = 0x102004, 329 .pci_irq = 0x102008, 330 .sysid = 0x100000, 331 .loopback = 0x103000, 332 .kcan_srb_fifo = 0x120000, 333 .kcan_srb = 0x121000, 334 .kcan_ch0 = 0x140000, 335 .kcan_ch1 = 0x142000, 336 }; 337 338 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { 339 .kcan_rx0 = BIT(4), 340 .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, 341 .all = GENMASK(4, 0), 342 }; 343 344 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { 345 .kcan_rx0 = BIT(4), 346 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, 347 .all = GENMASK(19, 16) | BIT(4), 348 }; 349 350 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { 351 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, 352 }; 353 354 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { 355 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, 356 }; 357 358 static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { 359 .address_offset = &kvaser_pciefd_altera_address_offset, 360 .irq_mask = &kvaser_pciefd_altera_irq_mask, 361 .ops = &kvaser_pciefd_altera_dev_ops, 362 }; 363 364 static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { 365 .address_offset = &kvaser_pciefd_sf2_address_offset, 366 .irq_mask = &kvaser_pciefd_sf2_irq_mask, 367 .ops = &kvaser_pciefd_sf2_dev_ops, 368 }; 369 370 struct kvaser_pciefd_can { 371 struct can_priv can; 372 struct kvaser_pciefd *kv_pcie; 373 void __iomem *reg_base; 374 struct can_berr_counter bec; 375 u8 cmd_seq; 376 int err_rep_cnt; 377 int echo_idx; 378 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 379 spinlock_t echo_lock; /* Locks the message echo buffer */ 380 struct timer_list bec_poll_timer; 381 struct completion start_comp, flush_comp; 382 }; 383 384 struct kvaser_pciefd { 385 struct pci_dev *pci; 386 void __iomem *reg_base; 387 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 388 const struct kvaser_pciefd_driver_data *driver_data; 389 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 390 u8 nr_channels; 391 u32 bus_freq; 392 u32 freq; 393 u32 freq_to_ticks_div; 394 }; 395 396 struct kvaser_pciefd_rx_packet { 397 u32 header[2]; 398 u64 timestamp; 399 }; 400 401 struct kvaser_pciefd_tx_packet { 402 u32 header[2]; 403 u8 data[64]; 404 }; 405 406 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 407 .name = KVASER_PCIEFD_DRV_NAME, 408 .tseg1_min = 1, 409 .tseg1_max = 512, 410 .tseg2_min = 1, 411 .tseg2_max = 32, 412 .sjw_max = 16, 413 .brp_min = 1, 414 .brp_max = 8192, 415 .brp_inc = 1, 416 }; 417 418 static struct pci_device_id kvaser_pciefd_id_table[] = { 419 { 420 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), 421 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 422 }, 423 { 424 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), 425 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 426 }, 427 { 428 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), 429 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 430 }, 431 { 432 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), 433 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 434 }, 435 { 436 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), 437 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 438 }, 439 { 440 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), 441 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 442 }, 443 { 444 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), 445 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 446 }, 447 { 448 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), 449 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 450 }, 451 { 452 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), 453 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 454 }, 455 { 456 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), 457 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 458 }, 459 { 460 0, 461 }, 462 }; 463 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 464 465 static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) 466 { 467 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | 468 FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), 469 can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 470 } 471 472 static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 473 { 474 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); 475 } 476 477 static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) 478 { 479 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); 480 } 481 482 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 483 { 484 u32 mode; 485 unsigned long irq; 486 487 spin_lock_irqsave(&can->lock, irq); 488 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 489 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 490 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 491 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 492 } 493 spin_unlock_irqrestore(&can->lock, irq); 494 } 495 496 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 497 { 498 u32 mode; 499 unsigned long irq; 500 501 spin_lock_irqsave(&can->lock, irq); 502 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 503 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 504 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 505 spin_unlock_irqrestore(&can->lock, irq); 506 } 507 508 static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 509 { 510 u32 msk; 511 512 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 513 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 514 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 515 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 516 KVASER_PCIEFD_KCAN_IRQ_TAR; 517 518 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 519 } 520 521 static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, 522 struct sk_buff *skb, u64 timestamp) 523 { 524 skb_hwtstamps(skb)->hwtstamp = 525 ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div)); 526 } 527 528 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 529 { 530 u32 mode; 531 unsigned long irq; 532 533 spin_lock_irqsave(&can->lock, irq); 534 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 535 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 536 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 537 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 538 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 539 else 540 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 541 } else { 542 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 543 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 544 } 545 546 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 547 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 548 else 549 mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; 550 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 551 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 552 /* Use ACK packet type */ 553 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 554 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 555 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 556 557 spin_unlock_irqrestore(&can->lock, irq); 558 } 559 560 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 561 { 562 u32 status; 563 unsigned long irq; 564 565 spin_lock_irqsave(&can->lock, irq); 566 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 567 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 568 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 569 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 570 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 571 /* If controller is already idle, run abort, flush and reset */ 572 kvaser_pciefd_abort_flush_reset(can); 573 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 574 u32 mode; 575 576 /* Put controller in reset mode */ 577 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 578 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 579 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 580 } 581 spin_unlock_irqrestore(&can->lock, irq); 582 } 583 584 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 585 { 586 u32 mode; 587 unsigned long irq; 588 589 del_timer(&can->bec_poll_timer); 590 if (!completion_done(&can->flush_comp)) 591 kvaser_pciefd_start_controller_flush(can); 592 593 if (!wait_for_completion_timeout(&can->flush_comp, 594 KVASER_PCIEFD_WAIT_TIMEOUT)) { 595 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 596 return -ETIMEDOUT; 597 } 598 599 spin_lock_irqsave(&can->lock, irq); 600 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 601 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 602 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 603 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 604 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 605 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 606 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 607 spin_unlock_irqrestore(&can->lock, irq); 608 609 if (!wait_for_completion_timeout(&can->start_comp, 610 KVASER_PCIEFD_WAIT_TIMEOUT)) { 611 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 612 return -ETIMEDOUT; 613 } 614 /* Reset interrupt handling */ 615 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 616 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 617 618 kvaser_pciefd_set_tx_irq(can); 619 kvaser_pciefd_setup_controller(can); 620 can->can.state = CAN_STATE_ERROR_ACTIVE; 621 netif_wake_queue(can->can.dev); 622 can->bec.txerr = 0; 623 can->bec.rxerr = 0; 624 can->err_rep_cnt = 0; 625 626 return 0; 627 } 628 629 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 630 { 631 u8 top; 632 u32 pwm_ctrl; 633 unsigned long irq; 634 635 spin_lock_irqsave(&can->lock, irq); 636 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 637 top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); 638 /* Set duty cycle to zero */ 639 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 640 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 641 spin_unlock_irqrestore(&can->lock, irq); 642 } 643 644 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 645 { 646 int top, trigger; 647 u32 pwm_ctrl; 648 unsigned long irq; 649 650 kvaser_pciefd_pwm_stop(can); 651 spin_lock_irqsave(&can->lock, irq); 652 /* Set frequency to 500 KHz */ 653 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 654 655 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 656 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 657 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 658 659 /* Set duty cycle to 95 */ 660 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 661 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); 662 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 663 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 664 spin_unlock_irqrestore(&can->lock, irq); 665 } 666 667 static int kvaser_pciefd_open(struct net_device *netdev) 668 { 669 int err; 670 struct kvaser_pciefd_can *can = netdev_priv(netdev); 671 672 err = open_candev(netdev); 673 if (err) 674 return err; 675 676 err = kvaser_pciefd_bus_on(can); 677 if (err) { 678 close_candev(netdev); 679 return err; 680 } 681 682 return 0; 683 } 684 685 static int kvaser_pciefd_stop(struct net_device *netdev) 686 { 687 struct kvaser_pciefd_can *can = netdev_priv(netdev); 688 int ret = 0; 689 690 /* Don't interrupt ongoing flush */ 691 if (!completion_done(&can->flush_comp)) 692 kvaser_pciefd_start_controller_flush(can); 693 694 if (!wait_for_completion_timeout(&can->flush_comp, 695 KVASER_PCIEFD_WAIT_TIMEOUT)) { 696 netdev_err(can->can.dev, "Timeout during stop\n"); 697 ret = -ETIMEDOUT; 698 } else { 699 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 700 del_timer(&can->bec_poll_timer); 701 } 702 can->can.state = CAN_STATE_STOPPED; 703 close_candev(netdev); 704 705 return ret; 706 } 707 708 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 709 struct kvaser_pciefd_can *can, 710 struct sk_buff *skb) 711 { 712 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 713 int packet_size; 714 int seq = can->echo_idx; 715 716 memset(p, 0, sizeof(*p)); 717 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 718 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 719 720 if (cf->can_id & CAN_RTR_FLAG) 721 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 722 723 if (cf->can_id & CAN_EFF_FLAG) 724 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 725 726 p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); 727 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 728 729 if (can_is_canfd_skb(skb)) { 730 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 731 can_fd_len2dlc(cf->len)); 732 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 733 if (cf->flags & CANFD_BRS) 734 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 735 if (cf->flags & CANFD_ESI) 736 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 737 } else { 738 p->header[1] |= 739 FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 740 can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); 741 } 742 743 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); 744 745 packet_size = cf->len; 746 memcpy(p->data, cf->data, packet_size); 747 748 return DIV_ROUND_UP(packet_size, 4); 749 } 750 751 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 752 struct net_device *netdev) 753 { 754 struct kvaser_pciefd_can *can = netdev_priv(netdev); 755 unsigned long irq_flags; 756 struct kvaser_pciefd_tx_packet packet; 757 int nr_words; 758 u8 count; 759 760 if (can_dev_dropped_skb(netdev, skb)) 761 return NETDEV_TX_OK; 762 763 nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 764 765 spin_lock_irqsave(&can->echo_lock, irq_flags); 766 /* Prepare and save echo skb in internal slot */ 767 can_put_echo_skb(skb, netdev, can->echo_idx, 0); 768 769 /* Move echo index to the next slot */ 770 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 771 772 /* Write header to fifo */ 773 iowrite32(packet.header[0], 774 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 775 iowrite32(packet.header[1], 776 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 777 778 if (nr_words) { 779 u32 data_last = ((u32 *)packet.data)[nr_words - 1]; 780 781 /* Write data to fifo, except last word */ 782 iowrite32_rep(can->reg_base + 783 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 784 nr_words - 1); 785 /* Write last word to end of fifo */ 786 __raw_writel(data_last, can->reg_base + 787 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 788 } else { 789 /* Complete write to fifo */ 790 __raw_writel(0, can->reg_base + 791 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 792 } 793 794 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 795 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 796 /* No room for a new message, stop the queue until at least one 797 * successful transmit 798 */ 799 if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) 800 netif_stop_queue(netdev); 801 spin_unlock_irqrestore(&can->echo_lock, irq_flags); 802 803 return NETDEV_TX_OK; 804 } 805 806 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 807 { 808 u32 mode, test, btrn; 809 unsigned long irq_flags; 810 int ret; 811 struct can_bittiming *bt; 812 813 if (data) 814 bt = &can->can.data_bittiming; 815 else 816 bt = &can->can.bittiming; 817 818 btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | 819 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | 820 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | 821 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); 822 823 spin_lock_irqsave(&can->lock, irq_flags); 824 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 825 /* Put the circuit in reset mode */ 826 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 827 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 828 829 /* Can only set bittiming if in reset mode */ 830 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 831 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); 832 if (ret) { 833 spin_unlock_irqrestore(&can->lock, irq_flags); 834 return -EBUSY; 835 } 836 837 if (data) 838 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 839 else 840 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 841 /* Restore previous reset mode status */ 842 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 843 spin_unlock_irqrestore(&can->lock, irq_flags); 844 845 return 0; 846 } 847 848 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 849 { 850 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 851 } 852 853 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 854 { 855 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 856 } 857 858 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 859 { 860 struct kvaser_pciefd_can *can = netdev_priv(ndev); 861 int ret = 0; 862 863 switch (mode) { 864 case CAN_MODE_START: 865 if (!can->can.restart_ms) 866 ret = kvaser_pciefd_bus_on(can); 867 break; 868 default: 869 return -EOPNOTSUPP; 870 } 871 872 return ret; 873 } 874 875 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 876 struct can_berr_counter *bec) 877 { 878 struct kvaser_pciefd_can *can = netdev_priv(ndev); 879 880 bec->rxerr = can->bec.rxerr; 881 bec->txerr = can->bec.txerr; 882 883 return 0; 884 } 885 886 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 887 { 888 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 889 890 kvaser_pciefd_enable_err_gen(can); 891 kvaser_pciefd_request_status(can); 892 can->err_rep_cnt = 0; 893 } 894 895 static const struct net_device_ops kvaser_pciefd_netdev_ops = { 896 .ndo_open = kvaser_pciefd_open, 897 .ndo_stop = kvaser_pciefd_stop, 898 .ndo_eth_ioctl = can_eth_ioctl_hwts, 899 .ndo_start_xmit = kvaser_pciefd_start_xmit, 900 .ndo_change_mtu = can_change_mtu, 901 }; 902 903 static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { 904 .get_ts_info = can_ethtool_op_get_ts_info_hwts, 905 }; 906 907 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 908 { 909 int i; 910 911 for (i = 0; i < pcie->nr_channels; i++) { 912 struct net_device *netdev; 913 struct kvaser_pciefd_can *can; 914 u32 status, tx_nr_packets_max; 915 916 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 917 KVASER_PCIEFD_CAN_TX_MAX_COUNT); 918 if (!netdev) 919 return -ENOMEM; 920 921 can = netdev_priv(netdev); 922 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 923 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; 924 can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); 925 can->kv_pcie = pcie; 926 can->cmd_seq = 0; 927 can->err_rep_cnt = 0; 928 can->bec.txerr = 0; 929 can->bec.rxerr = 0; 930 931 init_completion(&can->start_comp); 932 init_completion(&can->flush_comp); 933 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); 934 935 /* Disable Bus load reporting */ 936 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 937 938 tx_nr_packets_max = 939 FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, 940 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 941 942 can->can.clock.freq = pcie->freq; 943 can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); 944 can->echo_idx = 0; 945 spin_lock_init(&can->echo_lock); 946 spin_lock_init(&can->lock); 947 948 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 949 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 950 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 951 can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; 952 can->can.do_set_mode = kvaser_pciefd_set_mode; 953 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 954 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 955 CAN_CTRLMODE_FD | 956 CAN_CTRLMODE_FD_NON_ISO | 957 CAN_CTRLMODE_CC_LEN8_DLC; 958 959 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 960 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 961 dev_err(&pcie->pci->dev, 962 "CAN FD not supported as expected %d\n", i); 963 964 free_candev(netdev); 965 return -ENODEV; 966 } 967 968 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 969 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 970 971 netdev->flags |= IFF_ECHO; 972 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 973 974 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 975 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 976 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 977 978 pcie->can[i] = can; 979 kvaser_pciefd_pwm_start(can); 980 } 981 982 return 0; 983 } 984 985 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 986 { 987 int i; 988 989 for (i = 0; i < pcie->nr_channels; i++) { 990 int err = register_candev(pcie->can[i]->can.dev); 991 992 if (err) { 993 int j; 994 995 /* Unregister all successfully registered devices. */ 996 for (j = 0; j < i; j++) 997 unregister_candev(pcie->can[j]->can.dev); 998 return err; 999 } 1000 } 1001 1002 return 0; 1003 } 1004 1005 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 1006 dma_addr_t addr, int index) 1007 { 1008 void __iomem *serdes_base; 1009 u32 word1, word2; 1010 1011 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1012 word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; 1013 word2 = addr >> 32; 1014 #else 1015 word1 = addr; 1016 word2 = 0; 1017 #endif 1018 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; 1019 iowrite32(word1, serdes_base); 1020 iowrite32(word2, serdes_base + 0x4); 1021 } 1022 1023 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 1024 dma_addr_t addr, int index) 1025 { 1026 void __iomem *serdes_base; 1027 u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; 1028 u32 msb = 0x0; 1029 1030 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1031 msb = addr >> 32; 1032 #endif 1033 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; 1034 iowrite32(lsb, serdes_base); 1035 iowrite32(msb, serdes_base + 0x4); 1036 } 1037 1038 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1039 { 1040 int i; 1041 u32 srb_status; 1042 u32 srb_packet_count; 1043 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1044 1045 /* Disable the DMA */ 1046 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1047 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1048 pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, 1049 KVASER_PCIEFD_DMA_SIZE, 1050 &dma_addr[i], 1051 GFP_KERNEL); 1052 1053 if (!pcie->dma_data[i] || !dma_addr[i]) { 1054 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1055 KVASER_PCIEFD_DMA_SIZE); 1056 return -ENOMEM; 1057 } 1058 pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); 1059 } 1060 1061 /* Reset Rx FIFO, and both DMA buffers */ 1062 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1063 KVASER_PCIEFD_SRB_CMD_RDB1, 1064 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1065 /* Empty Rx FIFO */ 1066 srb_packet_count = 1067 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, 1068 ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + 1069 KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); 1070 while (srb_packet_count) { 1071 /* Drop current packet in FIFO */ 1072 ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 1073 srb_packet_count--; 1074 } 1075 1076 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1077 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1078 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1079 return -EIO; 1080 } 1081 1082 /* Enable the DMA */ 1083 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1084 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1085 1086 return 0; 1087 } 1088 1089 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1090 { 1091 u32 version, srb_status, build; 1092 1093 version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); 1094 pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, 1095 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); 1096 1097 build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); 1098 dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", 1099 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), 1100 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), 1101 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); 1102 1103 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1104 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1105 dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); 1106 return -ENODEV; 1107 } 1108 1109 pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1110 pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1111 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1112 if (pcie->freq_to_ticks_div == 0) 1113 pcie->freq_to_ticks_div = 1; 1114 /* Turn off all loopback functionality */ 1115 iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); 1116 1117 return 0; 1118 } 1119 1120 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1121 struct kvaser_pciefd_rx_packet *p, 1122 __le32 *data) 1123 { 1124 struct sk_buff *skb; 1125 struct canfd_frame *cf; 1126 struct can_priv *priv; 1127 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1128 u8 dlc; 1129 1130 if (ch_id >= pcie->nr_channels) 1131 return -EIO; 1132 1133 priv = &pcie->can[ch_id]->can; 1134 dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); 1135 1136 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1137 skb = alloc_canfd_skb(priv->dev, &cf); 1138 if (!skb) { 1139 priv->dev->stats.rx_dropped++; 1140 return -ENOMEM; 1141 } 1142 1143 cf->len = can_fd_dlc2len(dlc); 1144 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1145 cf->flags |= CANFD_BRS; 1146 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1147 cf->flags |= CANFD_ESI; 1148 } else { 1149 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1150 if (!skb) { 1151 priv->dev->stats.rx_dropped++; 1152 return -ENOMEM; 1153 } 1154 can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); 1155 } 1156 1157 cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); 1158 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1159 cf->can_id |= CAN_EFF_FLAG; 1160 1161 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { 1162 cf->can_id |= CAN_RTR_FLAG; 1163 } else { 1164 memcpy(cf->data, data, cf->len); 1165 priv->dev->stats.rx_bytes += cf->len; 1166 } 1167 priv->dev->stats.rx_packets++; 1168 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1169 1170 return netif_rx(skb); 1171 } 1172 1173 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1174 struct can_frame *cf, 1175 enum can_state new_state, 1176 enum can_state tx_state, 1177 enum can_state rx_state) 1178 { 1179 can_change_state(can->can.dev, cf, tx_state, rx_state); 1180 1181 if (new_state == CAN_STATE_BUS_OFF) { 1182 struct net_device *ndev = can->can.dev; 1183 unsigned long irq_flags; 1184 1185 spin_lock_irqsave(&can->lock, irq_flags); 1186 netif_stop_queue(can->can.dev); 1187 spin_unlock_irqrestore(&can->lock, irq_flags); 1188 /* Prevent CAN controller from auto recover from bus off */ 1189 if (!can->can.restart_ms) { 1190 kvaser_pciefd_start_controller_flush(can); 1191 can_bus_off(ndev); 1192 } 1193 } 1194 } 1195 1196 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1197 struct can_berr_counter *bec, 1198 enum can_state *new_state, 1199 enum can_state *tx_state, 1200 enum can_state *rx_state) 1201 { 1202 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1203 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1204 *new_state = CAN_STATE_BUS_OFF; 1205 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1206 *new_state = CAN_STATE_BUS_OFF; 1207 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1208 *new_state = CAN_STATE_ERROR_PASSIVE; 1209 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1210 *new_state = CAN_STATE_ERROR_PASSIVE; 1211 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1212 *new_state = CAN_STATE_ERROR_WARNING; 1213 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1214 *new_state = CAN_STATE_ERROR_WARNING; 1215 else 1216 *new_state = CAN_STATE_ERROR_ACTIVE; 1217 1218 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1219 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1220 } 1221 1222 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1223 struct kvaser_pciefd_rx_packet *p) 1224 { 1225 struct can_berr_counter bec; 1226 enum can_state old_state, new_state, tx_state, rx_state; 1227 struct net_device *ndev = can->can.dev; 1228 struct sk_buff *skb; 1229 struct can_frame *cf = NULL; 1230 1231 old_state = can->can.state; 1232 1233 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1234 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1235 1236 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1237 skb = alloc_can_err_skb(ndev, &cf); 1238 if (new_state != old_state) { 1239 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); 1240 if (old_state == CAN_STATE_BUS_OFF && 1241 new_state == CAN_STATE_ERROR_ACTIVE && 1242 can->can.restart_ms) { 1243 can->can.can_stats.restarts++; 1244 if (skb) 1245 cf->can_id |= CAN_ERR_RESTARTED; 1246 } 1247 } 1248 1249 can->err_rep_cnt++; 1250 can->can.can_stats.bus_error++; 1251 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) 1252 ndev->stats.tx_errors++; 1253 else 1254 ndev->stats.rx_errors++; 1255 1256 can->bec.txerr = bec.txerr; 1257 can->bec.rxerr = bec.rxerr; 1258 1259 if (!skb) { 1260 ndev->stats.rx_dropped++; 1261 return -ENOMEM; 1262 } 1263 1264 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1265 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; 1266 cf->data[6] = bec.txerr; 1267 cf->data[7] = bec.rxerr; 1268 1269 netif_rx(skb); 1270 1271 return 0; 1272 } 1273 1274 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1275 struct kvaser_pciefd_rx_packet *p) 1276 { 1277 struct kvaser_pciefd_can *can; 1278 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1279 1280 if (ch_id >= pcie->nr_channels) 1281 return -EIO; 1282 1283 can = pcie->can[ch_id]; 1284 kvaser_pciefd_rx_error_frame(can, p); 1285 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1286 /* Do not report more errors, until bec_poll_timer expires */ 1287 kvaser_pciefd_disable_err_gen(can); 1288 /* Start polling the error counters */ 1289 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1290 1291 return 0; 1292 } 1293 1294 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1295 struct kvaser_pciefd_rx_packet *p) 1296 { 1297 struct can_berr_counter bec; 1298 enum can_state old_state, new_state, tx_state, rx_state; 1299 1300 old_state = can->can.state; 1301 1302 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1303 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1304 1305 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1306 if (new_state != old_state) { 1307 struct net_device *ndev = can->can.dev; 1308 struct sk_buff *skb; 1309 struct can_frame *cf; 1310 1311 skb = alloc_can_err_skb(ndev, &cf); 1312 if (!skb) { 1313 ndev->stats.rx_dropped++; 1314 return -ENOMEM; 1315 } 1316 1317 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); 1318 if (old_state == CAN_STATE_BUS_OFF && 1319 new_state == CAN_STATE_ERROR_ACTIVE && 1320 can->can.restart_ms) { 1321 can->can.can_stats.restarts++; 1322 cf->can_id |= CAN_ERR_RESTARTED; 1323 } 1324 1325 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1326 1327 cf->data[6] = bec.txerr; 1328 cf->data[7] = bec.rxerr; 1329 1330 netif_rx(skb); 1331 } 1332 can->bec.txerr = bec.txerr; 1333 can->bec.rxerr = bec.rxerr; 1334 /* Check if we need to poll the error counters */ 1335 if (bec.txerr || bec.rxerr) 1336 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1337 1338 return 0; 1339 } 1340 1341 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1342 struct kvaser_pciefd_rx_packet *p) 1343 { 1344 struct kvaser_pciefd_can *can; 1345 u8 cmdseq; 1346 u32 status; 1347 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1348 1349 if (ch_id >= pcie->nr_channels) 1350 return -EIO; 1351 1352 can = pcie->can[ch_id]; 1353 1354 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1355 cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); 1356 1357 /* Reset done, start abort and flush */ 1358 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1359 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1360 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1361 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1362 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1363 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1364 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1365 kvaser_pciefd_abort_flush_reset(can); 1366 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1367 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1368 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1369 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1370 /* Reset detected, send end of flush if no packet are in FIFO */ 1371 u8 count; 1372 1373 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1374 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1375 if (!count) 1376 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, 1377 KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), 1378 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1379 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1380 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { 1381 /* Response to status request received */ 1382 kvaser_pciefd_handle_status_resp(can, p); 1383 if (can->can.state != CAN_STATE_BUS_OFF && 1384 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1385 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1386 } 1387 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1388 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { 1389 /* Reset to bus on detected */ 1390 if (!completion_done(&can->start_comp)) 1391 complete(&can->start_comp); 1392 } 1393 1394 return 0; 1395 } 1396 1397 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1398 struct kvaser_pciefd_rx_packet *p) 1399 { 1400 struct sk_buff *skb; 1401 struct can_frame *cf; 1402 1403 skb = alloc_can_err_skb(can->can.dev, &cf); 1404 can->can.dev->stats.tx_errors++; 1405 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1406 if (skb) 1407 cf->can_id |= CAN_ERR_LOSTARB; 1408 can->can.can_stats.arbitration_lost++; 1409 } else if (skb) { 1410 cf->can_id |= CAN_ERR_ACK; 1411 } 1412 1413 if (skb) { 1414 cf->can_id |= CAN_ERR_BUSERROR; 1415 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1416 netif_rx(skb); 1417 } else { 1418 can->can.dev->stats.rx_dropped++; 1419 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1420 } 1421 } 1422 1423 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1424 struct kvaser_pciefd_rx_packet *p) 1425 { 1426 struct kvaser_pciefd_can *can; 1427 bool one_shot_fail = false; 1428 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1429 1430 if (ch_id >= pcie->nr_channels) 1431 return -EIO; 1432 1433 can = pcie->can[ch_id]; 1434 /* Ignore control packet ACK */ 1435 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1436 return 0; 1437 1438 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1439 kvaser_pciefd_handle_nack_packet(can, p); 1440 one_shot_fail = true; 1441 } 1442 1443 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1444 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1445 } else { 1446 int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); 1447 int len; 1448 u8 count; 1449 struct sk_buff *skb; 1450 1451 skb = can->can.echo_skb[echo_idx]; 1452 if (skb) 1453 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1454 len = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1455 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1456 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1457 1458 if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) 1459 netif_wake_queue(can->can.dev); 1460 1461 if (!one_shot_fail) { 1462 can->can.dev->stats.tx_bytes += len; 1463 can->can.dev->stats.tx_packets++; 1464 } 1465 } 1466 1467 return 0; 1468 } 1469 1470 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1471 struct kvaser_pciefd_rx_packet *p) 1472 { 1473 struct kvaser_pciefd_can *can; 1474 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1475 1476 if (ch_id >= pcie->nr_channels) 1477 return -EIO; 1478 1479 can = pcie->can[ch_id]; 1480 1481 if (!completion_done(&can->flush_comp)) 1482 complete(&can->flush_comp); 1483 1484 return 0; 1485 } 1486 1487 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1488 int dma_buf) 1489 { 1490 __le32 *buffer = pcie->dma_data[dma_buf]; 1491 __le64 timestamp; 1492 struct kvaser_pciefd_rx_packet packet; 1493 struct kvaser_pciefd_rx_packet *p = &packet; 1494 u8 type; 1495 int pos = *start_pos; 1496 int size; 1497 int ret = 0; 1498 1499 size = le32_to_cpu(buffer[pos++]); 1500 if (!size) { 1501 *start_pos = 0; 1502 return 0; 1503 } 1504 1505 p->header[0] = le32_to_cpu(buffer[pos++]); 1506 p->header[1] = le32_to_cpu(buffer[pos++]); 1507 1508 /* Read 64-bit timestamp */ 1509 memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1510 pos += 2; 1511 p->timestamp = le64_to_cpu(timestamp); 1512 1513 type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); 1514 switch (type) { 1515 case KVASER_PCIEFD_PACK_TYPE_DATA: 1516 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1517 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1518 u8 data_len; 1519 1520 data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, 1521 p->header[1])); 1522 pos += DIV_ROUND_UP(data_len, 4); 1523 } 1524 break; 1525 1526 case KVASER_PCIEFD_PACK_TYPE_ACK: 1527 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1528 break; 1529 1530 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1531 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1532 break; 1533 1534 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1535 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1536 break; 1537 1538 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1539 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1540 break; 1541 1542 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1543 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1544 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1545 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1546 dev_info(&pcie->pci->dev, 1547 "Received unexpected packet type 0x%08X\n", type); 1548 break; 1549 1550 default: 1551 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1552 ret = -EIO; 1553 break; 1554 } 1555 1556 if (ret) 1557 return ret; 1558 1559 /* Position does not point to the end of the package, 1560 * corrupted packet size? 1561 */ 1562 if ((*start_pos + size) != pos) 1563 return -EIO; 1564 1565 /* Point to the next packet header, if any */ 1566 *start_pos = pos; 1567 1568 return ret; 1569 } 1570 1571 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1572 { 1573 int pos = 0; 1574 int res = 0; 1575 1576 do { 1577 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1578 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1579 1580 return res; 1581 } 1582 1583 static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1584 { 1585 u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1586 1587 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1588 kvaser_pciefd_read_buffer(pcie, 0); 1589 /* Reset DMA buffer 0 */ 1590 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1591 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1592 } 1593 1594 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1595 kvaser_pciefd_read_buffer(pcie, 1); 1596 /* Reset DMA buffer 1 */ 1597 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1598 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1599 } 1600 1601 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1602 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1603 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1604 irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1605 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1606 1607 iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1608 } 1609 1610 static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1611 { 1612 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1613 1614 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1615 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1616 1617 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1618 netdev_err(can->can.dev, 1619 "Fail to change bittiming, when not in reset mode\n"); 1620 1621 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1622 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1623 1624 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1625 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1626 1627 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1628 } 1629 1630 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1631 { 1632 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1633 const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; 1634 u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); 1635 int i; 1636 1637 if (!(board_irq & irq_mask->all)) 1638 return IRQ_NONE; 1639 1640 if (board_irq & irq_mask->kcan_rx0) 1641 kvaser_pciefd_receive_irq(pcie); 1642 1643 for (i = 0; i < pcie->nr_channels; i++) { 1644 if (!pcie->can[i]) { 1645 dev_err(&pcie->pci->dev, 1646 "IRQ mask points to unallocated controller\n"); 1647 break; 1648 } 1649 1650 /* Check that mask matches channel (i) IRQ mask */ 1651 if (board_irq & irq_mask->kcan_tx[i]) 1652 kvaser_pciefd_transmit_irq(pcie->can[i]); 1653 } 1654 1655 return IRQ_HANDLED; 1656 } 1657 1658 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1659 { 1660 int i; 1661 1662 for (i = 0; i < pcie->nr_channels; i++) { 1663 struct kvaser_pciefd_can *can = pcie->can[i]; 1664 1665 if (can) { 1666 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1667 kvaser_pciefd_pwm_stop(can); 1668 free_candev(can->can.dev); 1669 } 1670 } 1671 } 1672 1673 static int kvaser_pciefd_probe(struct pci_dev *pdev, 1674 const struct pci_device_id *id) 1675 { 1676 int err; 1677 struct kvaser_pciefd *pcie; 1678 const struct kvaser_pciefd_irq_mask *irq_mask; 1679 void __iomem *irq_en_base; 1680 1681 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1682 if (!pcie) 1683 return -ENOMEM; 1684 1685 pci_set_drvdata(pdev, pcie); 1686 pcie->pci = pdev; 1687 pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; 1688 irq_mask = pcie->driver_data->irq_mask; 1689 1690 err = pci_enable_device(pdev); 1691 if (err) 1692 return err; 1693 1694 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1695 if (err) 1696 goto err_disable_pci; 1697 1698 pcie->reg_base = pci_iomap(pdev, 0, 0); 1699 if (!pcie->reg_base) { 1700 err = -ENOMEM; 1701 goto err_release_regions; 1702 } 1703 1704 err = kvaser_pciefd_setup_board(pcie); 1705 if (err) 1706 goto err_pci_iounmap; 1707 1708 err = kvaser_pciefd_setup_dma(pcie); 1709 if (err) 1710 goto err_pci_iounmap; 1711 1712 pci_set_master(pdev); 1713 1714 err = kvaser_pciefd_setup_can_ctrls(pcie); 1715 if (err) 1716 goto err_teardown_can_ctrls; 1717 1718 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1719 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1720 if (err) 1721 goto err_teardown_can_ctrls; 1722 1723 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1724 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1725 1726 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1727 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1728 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1729 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); 1730 1731 /* Enable PCI interrupts */ 1732 irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); 1733 iowrite32(irq_mask->all, irq_en_base); 1734 /* Ready the DMA buffers */ 1735 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1736 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1737 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1738 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1739 1740 err = kvaser_pciefd_reg_candev(pcie); 1741 if (err) 1742 goto err_free_irq; 1743 1744 return 0; 1745 1746 err_free_irq: 1747 /* Disable PCI interrupts */ 1748 iowrite32(0, irq_en_base); 1749 free_irq(pcie->pci->irq, pcie); 1750 1751 err_teardown_can_ctrls: 1752 kvaser_pciefd_teardown_can_ctrls(pcie); 1753 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1754 pci_clear_master(pdev); 1755 1756 err_pci_iounmap: 1757 pci_iounmap(pdev, pcie->reg_base); 1758 1759 err_release_regions: 1760 pci_release_regions(pdev); 1761 1762 err_disable_pci: 1763 pci_disable_device(pdev); 1764 1765 return err; 1766 } 1767 1768 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1769 { 1770 int i; 1771 1772 for (i = 0; i < pcie->nr_channels; i++) { 1773 struct kvaser_pciefd_can *can = pcie->can[i]; 1774 1775 if (can) { 1776 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1777 unregister_candev(can->can.dev); 1778 del_timer(&can->bec_poll_timer); 1779 kvaser_pciefd_pwm_stop(can); 1780 free_candev(can->can.dev); 1781 } 1782 } 1783 } 1784 1785 static void kvaser_pciefd_remove(struct pci_dev *pdev) 1786 { 1787 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1788 1789 kvaser_pciefd_remove_all_ctrls(pcie); 1790 1791 /* Disable interrupts */ 1792 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1793 iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); 1794 1795 free_irq(pcie->pci->irq, pcie); 1796 1797 pci_iounmap(pdev, pcie->reg_base); 1798 pci_release_regions(pdev); 1799 pci_disable_device(pdev); 1800 } 1801 1802 static struct pci_driver kvaser_pciefd = { 1803 .name = KVASER_PCIEFD_DRV_NAME, 1804 .id_table = kvaser_pciefd_id_table, 1805 .probe = kvaser_pciefd_probe, 1806 .remove = kvaser_pciefd_remove, 1807 }; 1808 1809 module_pci_driver(kvaser_pciefd) 1810