1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.25) 5 * - PEAK linux canfd driver 6 * - Altera Avalon EPCS flash controller driver 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/device.h> 12 #include <linux/ethtool.h> 13 #include <linux/pci.h> 14 #include <linux/can/dev.h> 15 #include <linux/timer.h> 16 #include <linux/netdevice.h> 17 #include <linux/crc32.h> 18 #include <linux/iopoll.h> 19 20 MODULE_LICENSE("Dual BSD/GPL"); 21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 23 24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 25 26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 28 #define KVASER_PCIEFD_MAX_ERR_REP 256 29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 31 #define KVASER_PCIEFD_DMA_COUNT 2 32 33 #define KVASER_PCIEFD_DMA_SIZE (4 * 1024) 34 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) 35 36 #define KVASER_PCIEFD_VENDOR 0x1a07 37 #define KVASER_PCIEFD_4HS_ID 0x0d 38 #define KVASER_PCIEFD_2HS_ID 0x0e 39 #define KVASER_PCIEFD_HS_ID 0x0f 40 #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 41 #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 42 43 /* PCIe IRQ registers */ 44 #define KVASER_PCIEFD_IRQ_REG 0x40 45 #define KVASER_PCIEFD_IEN_REG 0x50 46 /* DMA map */ 47 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 48 /* Kvaser KCAN CAN controller registers */ 49 #define KVASER_PCIEFD_KCAN0_BASE 0x10000 50 #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 51 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 52 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 53 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 54 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 55 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 56 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 57 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 58 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 59 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 60 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 61 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 62 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 63 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 64 /* Loopback control register */ 65 #define KVASER_PCIEFD_LOOP_REG 0x1f000 66 /* System identification and information registers */ 67 #define KVASER_PCIEFD_SYSID_BASE 0x1f020 68 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) 69 #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) 70 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) 71 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) 72 /* Shared receive buffer registers */ 73 #define KVASER_PCIEFD_SRB_BASE 0x1f200 74 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) 75 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) 76 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) 77 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) 78 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) 79 /* EPCS flash controller registers */ 80 #define KVASER_PCIEFD_SPI_BASE 0x1fc00 81 #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE 82 #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) 83 #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) 84 #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) 85 #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) 86 87 #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f 88 #define KVASER_PCIEFD_IRQ_SRB BIT(4) 89 90 #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 91 #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 92 #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1 93 94 /* Reset DMA buffer 0, 1 and FIFO offset */ 95 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 96 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 97 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 98 99 /* DMA packet done, buffer 0 and 1 */ 100 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 101 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 102 /* DMA overflow, buffer 0 and 1 */ 103 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 104 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 105 /* DMA underflow, buffer 0 and 1 */ 106 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 107 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 108 109 /* DMA idle */ 110 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 111 /* DMA support */ 112 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 113 114 /* DMA Enable */ 115 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 116 117 /* EPCS flash controller definitions */ 118 #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) 119 #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) 120 #define KVASER_PCIEFD_CFG_MAX_PARAMS 256 121 #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d 122 #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 123 #define KVASER_PCIEFD_CFG_SYS_VER 1 124 #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 125 #define KVASER_PCIEFD_SPI_TMT BIT(5) 126 #define KVASER_PCIEFD_SPI_TRDY BIT(6) 127 #define KVASER_PCIEFD_SPI_RRDY BIT(7) 128 #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14 129 /* Commands for controlling the onboard flash */ 130 #define KVASER_PCIEFD_FLASH_RES_CMD 0xab 131 #define KVASER_PCIEFD_FLASH_READ_CMD 0x3 132 #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5 133 134 /* Kvaser KCAN definitions */ 135 #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) 136 #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) 137 138 #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16 139 /* Request status packet */ 140 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 141 /* Abort, flush and reset */ 142 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 143 144 /* Tx FIFO unaligned read */ 145 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 146 /* Tx FIFO unaligned end */ 147 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 148 /* Bus parameter protection error */ 149 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 150 /* FDF bit when controller is in classic mode */ 151 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 152 /* Rx FIFO overflow */ 153 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 154 /* Abort done */ 155 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 156 /* Tx buffer flush done */ 157 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 158 /* Tx FIFO overflow */ 159 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 160 /* Tx FIFO empty */ 161 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 162 /* Transmitter unaligned */ 163 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 164 165 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 166 167 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 168 /* Abort request */ 169 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 170 /* Idle state. Controller in reset mode and no abort or flush pending */ 171 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 172 /* Bus off */ 173 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 174 /* Reset mode request */ 175 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 176 /* Controller in reset mode */ 177 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 178 /* Controller got one-shot capability */ 179 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 180 /* Controller got CAN FD capability */ 181 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 182 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ 183 KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ 184 KVASER_PCIEFD_KCAN_STAT_IRM) 185 186 /* Reset mode */ 187 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 188 /* Listen only mode */ 189 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 190 /* Error packet enable */ 191 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 192 /* CAN FD non-ISO */ 193 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 194 /* Acknowledgment packet type */ 195 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 196 /* Active error flag enable. Clear to force error passive */ 197 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 198 /* Classic CAN mode */ 199 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 200 201 #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 202 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 203 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 204 205 #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 206 207 /* Kvaser KCAN packet types */ 208 #define KVASER_PCIEFD_PACK_TYPE_DATA 0 209 #define KVASER_PCIEFD_PACK_TYPE_ACK 1 210 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 211 #define KVASER_PCIEFD_PACK_TYPE_ERROR 3 212 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 213 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 214 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 215 #define KVASER_PCIEFD_PACK_TYPE_STATUS 8 216 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 217 218 /* Kvaser KCAN packet common definitions */ 219 #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff 220 #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 221 #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28 222 223 /* Kvaser KCAN TDATA and RDATA first word */ 224 #define KVASER_PCIEFD_RPACKET_IDE BIT(30) 225 #define KVASER_PCIEFD_RPACKET_RTR BIT(29) 226 /* Kvaser KCAN TDATA and RDATA second word */ 227 #define KVASER_PCIEFD_RPACKET_ESI BIT(13) 228 #define KVASER_PCIEFD_RPACKET_BRS BIT(14) 229 #define KVASER_PCIEFD_RPACKET_FDF BIT(15) 230 #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8 231 /* Kvaser KCAN TDATA second word */ 232 #define KVASER_PCIEFD_TPACKET_SMS BIT(16) 233 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 234 235 /* Kvaser KCAN APACKET */ 236 #define KVASER_PCIEFD_APACKET_FLU BIT(8) 237 #define KVASER_PCIEFD_APACKET_CT BIT(9) 238 #define KVASER_PCIEFD_APACKET_ABL BIT(10) 239 #define KVASER_PCIEFD_APACKET_NACK BIT(11) 240 241 /* Kvaser KCAN SPACK first word */ 242 #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8 243 #define KVASER_PCIEFD_SPACK_BOFF BIT(16) 244 #define KVASER_PCIEFD_SPACK_IDET BIT(20) 245 #define KVASER_PCIEFD_SPACK_IRM BIT(21) 246 #define KVASER_PCIEFD_SPACK_RMCD BIT(22) 247 /* Kvaser KCAN SPACK second word */ 248 #define KVASER_PCIEFD_SPACK_AUTO BIT(21) 249 #define KVASER_PCIEFD_SPACK_EWLR BIT(23) 250 #define KVASER_PCIEFD_SPACK_EPLR BIT(24) 251 252 /* Kvaser KCAN_EPACK second word */ 253 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 254 255 struct kvaser_pciefd; 256 257 struct kvaser_pciefd_can { 258 struct can_priv can; 259 struct kvaser_pciefd *kv_pcie; 260 void __iomem *reg_base; 261 struct can_berr_counter bec; 262 u8 cmd_seq; 263 int err_rep_cnt; 264 int echo_idx; 265 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 266 spinlock_t echo_lock; /* Locks the message echo buffer */ 267 struct timer_list bec_poll_timer; 268 struct completion start_comp, flush_comp; 269 }; 270 271 struct kvaser_pciefd { 272 struct pci_dev *pci; 273 void __iomem *reg_base; 274 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 275 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 276 u8 nr_channels; 277 u32 bus_freq; 278 u32 freq; 279 u32 freq_to_ticks_div; 280 }; 281 282 struct kvaser_pciefd_rx_packet { 283 u32 header[2]; 284 u64 timestamp; 285 }; 286 287 struct kvaser_pciefd_tx_packet { 288 u32 header[2]; 289 u8 data[64]; 290 }; 291 292 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 293 .name = KVASER_PCIEFD_DRV_NAME, 294 .tseg1_min = 1, 295 .tseg1_max = 512, 296 .tseg2_min = 1, 297 .tseg2_max = 32, 298 .sjw_max = 16, 299 .brp_min = 1, 300 .brp_max = 8192, 301 .brp_inc = 1, 302 }; 303 304 struct kvaser_pciefd_cfg_param { 305 __le32 magic; 306 __le32 nr; 307 __le32 len; 308 u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; 309 }; 310 311 struct kvaser_pciefd_cfg_img { 312 __le32 version; 313 __le32 magic; 314 __le32 crc; 315 struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; 316 }; 317 318 static struct pci_device_id kvaser_pciefd_id_table[] = { 319 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, 320 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, 321 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, 322 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, 323 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, 324 { 0,}, 325 }; 326 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 327 328 /* Onboard flash memory functions */ 329 static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) 330 { 331 u32 res; 332 int ret; 333 334 ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG, 335 res, res & msk, 0, 10); 336 337 return ret; 338 } 339 340 static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, 341 u32 tx_len, u8 *rx, u32 rx_len) 342 { 343 int c; 344 345 iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG); 346 iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 347 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 348 349 c = tx_len; 350 while (c--) { 351 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 352 return -EIO; 353 354 iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 355 356 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 357 return -EIO; 358 359 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 360 } 361 362 c = rx_len; 363 while (c-- > 0) { 364 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 365 return -EIO; 366 367 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 368 369 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 370 return -EIO; 371 372 *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 373 } 374 375 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) 376 return -EIO; 377 378 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 379 380 if (c != -1) { 381 dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); 382 return -EIO; 383 } 384 385 return 0; 386 } 387 388 static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, 389 struct kvaser_pciefd_cfg_img *img) 390 { 391 int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; 392 int res, crc; 393 u8 *crc_buff; 394 395 u8 cmd[] = { 396 KVASER_PCIEFD_FLASH_READ_CMD, 397 (u8)((offset >> 16) & 0xff), 398 (u8)((offset >> 8) & 0xff), 399 (u8)(offset & 0xff) 400 }; 401 402 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img, 403 KVASER_PCIEFD_CFG_IMG_SZ); 404 if (res) 405 return res; 406 407 crc_buff = (u8 *)img->params; 408 409 if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { 410 dev_err(&pcie->pci->dev, 411 "Config flash corrupted, version number is wrong\n"); 412 return -ENODEV; 413 } 414 415 if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { 416 dev_err(&pcie->pci->dev, 417 "Config flash corrupted, magic number is wrong\n"); 418 return -ENODEV; 419 } 420 421 crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); 422 if (le32_to_cpu(img->crc) != crc) { 423 dev_err(&pcie->pci->dev, 424 "Stored CRC does not match flash image contents\n"); 425 return -EIO; 426 } 427 428 return 0; 429 } 430 431 static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, 432 struct kvaser_pciefd_cfg_img *img) 433 { 434 struct kvaser_pciefd_cfg_param *param; 435 436 param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; 437 memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); 438 } 439 440 static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) 441 { 442 int res; 443 struct kvaser_pciefd_cfg_img *img; 444 445 /* Read electronic signature */ 446 u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; 447 448 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1); 449 if (res) 450 return -EIO; 451 452 img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); 453 if (!img) 454 return -ENOMEM; 455 456 if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) { 457 dev_err(&pcie->pci->dev, 458 "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n", 459 cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16); 460 461 res = -ENODEV; 462 goto image_free; 463 } 464 465 cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; 466 res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); 467 if (res) { 468 goto image_free; 469 } else if (cmd[0] & 1) { 470 res = -EIO; 471 /* No write is ever done, the WIP should never be set */ 472 dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); 473 goto image_free; 474 } 475 476 res = kvaser_pciefd_cfg_read_and_verify(pcie, img); 477 if (res) { 478 res = -EIO; 479 goto image_free; 480 } 481 482 kvaser_pciefd_cfg_read_params(pcie, img); 483 484 image_free: 485 kfree(img); 486 return res; 487 } 488 489 static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 490 { 491 u32 cmd; 492 493 cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; 494 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 495 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 496 } 497 498 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 499 { 500 u32 mode; 501 unsigned long irq; 502 503 spin_lock_irqsave(&can->lock, irq); 504 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 505 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 506 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 507 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 508 } 509 spin_unlock_irqrestore(&can->lock, irq); 510 } 511 512 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 513 { 514 u32 mode; 515 unsigned long irq; 516 517 spin_lock_irqsave(&can->lock, irq); 518 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 519 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 520 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 521 spin_unlock_irqrestore(&can->lock, irq); 522 } 523 524 static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 525 { 526 u32 msk; 527 528 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 529 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 530 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 531 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 532 KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; 533 534 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 535 536 return 0; 537 } 538 539 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 540 { 541 u32 mode; 542 unsigned long irq; 543 544 spin_lock_irqsave(&can->lock, irq); 545 546 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 547 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 548 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 549 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 550 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 551 else 552 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 553 } else { 554 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 555 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 556 } 557 558 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 559 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 560 561 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 562 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 563 /* Use ACK packet type */ 564 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 565 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 566 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 567 568 spin_unlock_irqrestore(&can->lock, irq); 569 } 570 571 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 572 { 573 u32 status; 574 unsigned long irq; 575 576 spin_lock_irqsave(&can->lock, irq); 577 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 578 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, 579 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 580 581 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 582 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 583 u32 cmd; 584 585 /* If controller is already idle, run abort, flush and reset */ 586 cmd = KVASER_PCIEFD_KCAN_CMD_AT; 587 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 588 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 589 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 590 u32 mode; 591 592 /* Put controller in reset mode */ 593 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 594 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 595 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 596 } 597 598 spin_unlock_irqrestore(&can->lock, irq); 599 } 600 601 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 602 { 603 u32 mode; 604 unsigned long irq; 605 606 del_timer(&can->bec_poll_timer); 607 608 if (!completion_done(&can->flush_comp)) 609 kvaser_pciefd_start_controller_flush(can); 610 611 if (!wait_for_completion_timeout(&can->flush_comp, 612 KVASER_PCIEFD_WAIT_TIMEOUT)) { 613 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 614 return -ETIMEDOUT; 615 } 616 617 spin_lock_irqsave(&can->lock, irq); 618 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 619 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 620 621 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, 622 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 623 624 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 625 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 626 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 627 spin_unlock_irqrestore(&can->lock, irq); 628 629 if (!wait_for_completion_timeout(&can->start_comp, 630 KVASER_PCIEFD_WAIT_TIMEOUT)) { 631 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 632 return -ETIMEDOUT; 633 } 634 /* Reset interrupt handling */ 635 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 636 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 637 638 kvaser_pciefd_set_tx_irq(can); 639 kvaser_pciefd_setup_controller(can); 640 641 can->can.state = CAN_STATE_ERROR_ACTIVE; 642 netif_wake_queue(can->can.dev); 643 can->bec.txerr = 0; 644 can->bec.rxerr = 0; 645 can->err_rep_cnt = 0; 646 647 return 0; 648 } 649 650 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 651 { 652 u8 top; 653 u32 pwm_ctrl; 654 unsigned long irq; 655 656 spin_lock_irqsave(&can->lock, irq); 657 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 658 top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; 659 660 /* Set duty cycle to zero */ 661 pwm_ctrl |= top; 662 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 663 spin_unlock_irqrestore(&can->lock, irq); 664 } 665 666 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 667 { 668 int top, trigger; 669 u32 pwm_ctrl; 670 unsigned long irq; 671 672 kvaser_pciefd_pwm_stop(can); 673 spin_lock_irqsave(&can->lock, irq); 674 675 /* Set frequency to 500 KHz*/ 676 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 677 678 pwm_ctrl = top & 0xff; 679 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 680 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 681 682 /* Set duty cycle to 95 */ 683 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 684 pwm_ctrl = trigger & 0xff; 685 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 686 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 687 spin_unlock_irqrestore(&can->lock, irq); 688 } 689 690 static int kvaser_pciefd_open(struct net_device *netdev) 691 { 692 int err; 693 struct kvaser_pciefd_can *can = netdev_priv(netdev); 694 695 err = open_candev(netdev); 696 if (err) 697 return err; 698 699 err = kvaser_pciefd_bus_on(can); 700 if (err) { 701 close_candev(netdev); 702 return err; 703 } 704 705 return 0; 706 } 707 708 static int kvaser_pciefd_stop(struct net_device *netdev) 709 { 710 struct kvaser_pciefd_can *can = netdev_priv(netdev); 711 int ret = 0; 712 713 /* Don't interrupt ongoing flush */ 714 if (!completion_done(&can->flush_comp)) 715 kvaser_pciefd_start_controller_flush(can); 716 717 if (!wait_for_completion_timeout(&can->flush_comp, 718 KVASER_PCIEFD_WAIT_TIMEOUT)) { 719 netdev_err(can->can.dev, "Timeout during stop\n"); 720 ret = -ETIMEDOUT; 721 } else { 722 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 723 del_timer(&can->bec_poll_timer); 724 } 725 close_candev(netdev); 726 727 return ret; 728 } 729 730 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 731 struct kvaser_pciefd_can *can, 732 struct sk_buff *skb) 733 { 734 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 735 int packet_size; 736 int seq = can->echo_idx; 737 738 memset(p, 0, sizeof(*p)); 739 740 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 741 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 742 743 if (cf->can_id & CAN_RTR_FLAG) 744 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 745 746 if (cf->can_id & CAN_EFF_FLAG) 747 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 748 749 p->header[0] |= cf->can_id & CAN_EFF_MASK; 750 p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; 751 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 752 753 if (can_is_canfd_skb(skb)) { 754 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 755 if (cf->flags & CANFD_BRS) 756 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 757 if (cf->flags & CANFD_ESI) 758 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 759 } 760 761 p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; 762 763 packet_size = cf->len; 764 memcpy(p->data, cf->data, packet_size); 765 766 return DIV_ROUND_UP(packet_size, 4); 767 } 768 769 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 770 struct net_device *netdev) 771 { 772 struct kvaser_pciefd_can *can = netdev_priv(netdev); 773 unsigned long irq_flags; 774 struct kvaser_pciefd_tx_packet packet; 775 int nwords; 776 u8 count; 777 778 if (can_dropped_invalid_skb(netdev, skb)) 779 return NETDEV_TX_OK; 780 781 nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 782 783 spin_lock_irqsave(&can->echo_lock, irq_flags); 784 785 /* Prepare and save echo skb in internal slot */ 786 can_put_echo_skb(skb, netdev, can->echo_idx, 0); 787 788 /* Move echo index to the next slot */ 789 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 790 791 /* Write header to fifo */ 792 iowrite32(packet.header[0], 793 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 794 iowrite32(packet.header[1], 795 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 796 797 if (nwords) { 798 u32 data_last = ((u32 *)packet.data)[nwords - 1]; 799 800 /* Write data to fifo, except last word */ 801 iowrite32_rep(can->reg_base + 802 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 803 nwords - 1); 804 /* Write last word to end of fifo */ 805 __raw_writel(data_last, can->reg_base + 806 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 807 } else { 808 /* Complete write to fifo */ 809 __raw_writel(0, can->reg_base + 810 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 811 } 812 813 count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 814 /* No room for a new message, stop the queue until at least one 815 * successful transmit 816 */ 817 if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || 818 can->can.echo_skb[can->echo_idx]) 819 netif_stop_queue(netdev); 820 821 spin_unlock_irqrestore(&can->echo_lock, irq_flags); 822 823 return NETDEV_TX_OK; 824 } 825 826 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 827 { 828 u32 mode, test, btrn; 829 unsigned long irq_flags; 830 int ret; 831 struct can_bittiming *bt; 832 833 if (data) 834 bt = &can->can.data_bittiming; 835 else 836 bt = &can->can.bittiming; 837 838 btrn = ((bt->phase_seg2 - 1) & 0x1f) << 839 KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | 840 (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << 841 KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | 842 ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT | 843 ((bt->brp - 1) & 0x1fff); 844 845 spin_lock_irqsave(&can->lock, irq_flags); 846 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 847 848 /* Put the circuit in reset mode */ 849 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 850 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 851 852 /* Can only set bittiming if in reset mode */ 853 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 854 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 855 0, 10); 856 857 if (ret) { 858 spin_unlock_irqrestore(&can->lock, irq_flags); 859 return -EBUSY; 860 } 861 862 if (data) 863 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 864 else 865 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 866 867 /* Restore previous reset mode status */ 868 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 869 870 spin_unlock_irqrestore(&can->lock, irq_flags); 871 return 0; 872 } 873 874 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 875 { 876 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 877 } 878 879 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 880 { 881 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 882 } 883 884 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 885 { 886 struct kvaser_pciefd_can *can = netdev_priv(ndev); 887 int ret = 0; 888 889 switch (mode) { 890 case CAN_MODE_START: 891 if (!can->can.restart_ms) 892 ret = kvaser_pciefd_bus_on(can); 893 break; 894 default: 895 return -EOPNOTSUPP; 896 } 897 898 return ret; 899 } 900 901 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 902 struct can_berr_counter *bec) 903 { 904 struct kvaser_pciefd_can *can = netdev_priv(ndev); 905 906 bec->rxerr = can->bec.rxerr; 907 bec->txerr = can->bec.txerr; 908 return 0; 909 } 910 911 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 912 { 913 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 914 915 kvaser_pciefd_enable_err_gen(can); 916 kvaser_pciefd_request_status(can); 917 can->err_rep_cnt = 0; 918 } 919 920 static const struct net_device_ops kvaser_pciefd_netdev_ops = { 921 .ndo_open = kvaser_pciefd_open, 922 .ndo_stop = kvaser_pciefd_stop, 923 .ndo_eth_ioctl = can_eth_ioctl_hwts, 924 .ndo_start_xmit = kvaser_pciefd_start_xmit, 925 .ndo_change_mtu = can_change_mtu, 926 }; 927 928 static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { 929 .get_ts_info = can_ethtool_op_get_ts_info_hwts, 930 }; 931 932 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 933 { 934 int i; 935 936 for (i = 0; i < pcie->nr_channels; i++) { 937 struct net_device *netdev; 938 struct kvaser_pciefd_can *can; 939 u32 status, tx_npackets; 940 941 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 942 KVASER_PCIEFD_CAN_TX_MAX_COUNT); 943 if (!netdev) 944 return -ENOMEM; 945 946 can = netdev_priv(netdev); 947 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 948 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; 949 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + 950 i * KVASER_PCIEFD_KCAN_BASE_OFFSET; 951 952 can->kv_pcie = pcie; 953 can->cmd_seq = 0; 954 can->err_rep_cnt = 0; 955 can->bec.txerr = 0; 956 can->bec.rxerr = 0; 957 958 init_completion(&can->start_comp); 959 init_completion(&can->flush_comp); 960 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 961 0); 962 963 /* Disable Bus load reporting */ 964 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 965 966 tx_npackets = ioread32(can->reg_base + 967 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 968 if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & 969 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) { 970 dev_err(&pcie->pci->dev, 971 "Max Tx count is smaller than expected\n"); 972 973 free_candev(netdev); 974 return -ENODEV; 975 } 976 977 can->can.clock.freq = pcie->freq; 978 can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; 979 can->echo_idx = 0; 980 spin_lock_init(&can->echo_lock); 981 spin_lock_init(&can->lock); 982 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 983 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 984 985 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 986 can->can.do_set_data_bittiming = 987 kvaser_pciefd_set_data_bittiming; 988 989 can->can.do_set_mode = kvaser_pciefd_set_mode; 990 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 991 992 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 993 CAN_CTRLMODE_FD | 994 CAN_CTRLMODE_FD_NON_ISO; 995 996 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 997 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 998 dev_err(&pcie->pci->dev, 999 "CAN FD not supported as expected %d\n", i); 1000 1001 free_candev(netdev); 1002 return -ENODEV; 1003 } 1004 1005 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 1006 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 1007 1008 netdev->flags |= IFF_ECHO; 1009 1010 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 1011 1012 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1013 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | 1014 KVASER_PCIEFD_KCAN_IRQ_TFD, 1015 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1016 1017 pcie->can[i] = can; 1018 kvaser_pciefd_pwm_start(can); 1019 } 1020 1021 return 0; 1022 } 1023 1024 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 1025 { 1026 int i; 1027 1028 for (i = 0; i < pcie->nr_channels; i++) { 1029 int err = register_candev(pcie->can[i]->can.dev); 1030 1031 if (err) { 1032 int j; 1033 1034 /* Unregister all successfully registered devices. */ 1035 for (j = 0; j < i; j++) 1036 unregister_candev(pcie->can[j]->can.dev); 1037 return err; 1038 } 1039 } 1040 1041 return 0; 1042 } 1043 1044 static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, 1045 dma_addr_t addr, int offset) 1046 { 1047 u32 word1, word2; 1048 1049 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1050 word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; 1051 word2 = addr >> 32; 1052 #else 1053 word1 = addr; 1054 word2 = 0; 1055 #endif 1056 iowrite32(word1, pcie->reg_base + offset); 1057 iowrite32(word2, pcie->reg_base + offset + 4); 1058 } 1059 1060 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1061 { 1062 int i; 1063 u32 srb_status; 1064 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1065 1066 /* Disable the DMA */ 1067 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1068 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1069 unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; 1070 1071 pcie->dma_data[i] = 1072 dmam_alloc_coherent(&pcie->pci->dev, 1073 KVASER_PCIEFD_DMA_SIZE, 1074 &dma_addr[i], 1075 GFP_KERNEL); 1076 1077 if (!pcie->dma_data[i] || !dma_addr[i]) { 1078 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1079 KVASER_PCIEFD_DMA_SIZE); 1080 return -ENOMEM; 1081 } 1082 1083 kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); 1084 } 1085 1086 /* Reset Rx FIFO, and both DMA buffers */ 1087 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1088 KVASER_PCIEFD_SRB_CMD_RDB1, 1089 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1090 1091 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1092 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1093 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1094 return -EIO; 1095 } 1096 1097 /* Enable the DMA */ 1098 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1099 pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1100 1101 return 0; 1102 } 1103 1104 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1105 { 1106 u32 sysid, srb_status, build; 1107 u8 sysid_nr_chan; 1108 int ret; 1109 1110 ret = kvaser_pciefd_read_cfg(pcie); 1111 if (ret) 1112 return ret; 1113 1114 sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); 1115 sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff; 1116 if (pcie->nr_channels != sysid_nr_chan) { 1117 dev_err(&pcie->pci->dev, 1118 "Number of channels does not match: %u vs %u\n", 1119 pcie->nr_channels, 1120 sysid_nr_chan); 1121 return -ENODEV; 1122 } 1123 1124 if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) 1125 pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; 1126 1127 build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); 1128 dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", 1129 (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, 1130 sysid & 0xff, 1131 (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); 1132 1133 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1134 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1135 dev_err(&pcie->pci->dev, 1136 "Hardware without DMA is not supported\n"); 1137 return -ENODEV; 1138 } 1139 1140 pcie->bus_freq = ioread32(pcie->reg_base + 1141 KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1142 pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1143 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1144 if (pcie->freq_to_ticks_div == 0) 1145 pcie->freq_to_ticks_div = 1; 1146 1147 /* Turn off all loopback functionality */ 1148 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); 1149 return ret; 1150 } 1151 1152 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1153 struct kvaser_pciefd_rx_packet *p, 1154 __le32 *data) 1155 { 1156 struct sk_buff *skb; 1157 struct canfd_frame *cf; 1158 struct can_priv *priv; 1159 struct net_device_stats *stats; 1160 struct skb_shared_hwtstamps *shhwtstamps; 1161 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1162 1163 if (ch_id >= pcie->nr_channels) 1164 return -EIO; 1165 1166 priv = &pcie->can[ch_id]->can; 1167 stats = &priv->dev->stats; 1168 1169 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1170 skb = alloc_canfd_skb(priv->dev, &cf); 1171 if (!skb) { 1172 stats->rx_dropped++; 1173 return -ENOMEM; 1174 } 1175 1176 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1177 cf->flags |= CANFD_BRS; 1178 1179 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1180 cf->flags |= CANFD_ESI; 1181 } else { 1182 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1183 if (!skb) { 1184 stats->rx_dropped++; 1185 return -ENOMEM; 1186 } 1187 } 1188 1189 cf->can_id = p->header[0] & CAN_EFF_MASK; 1190 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1191 cf->can_id |= CAN_EFF_FLAG; 1192 1193 cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1194 1195 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { 1196 cf->can_id |= CAN_RTR_FLAG; 1197 } else { 1198 memcpy(cf->data, data, cf->len); 1199 1200 stats->rx_bytes += cf->len; 1201 } 1202 stats->rx_packets++; 1203 1204 shhwtstamps = skb_hwtstamps(skb); 1205 1206 shhwtstamps->hwtstamp = 1207 ns_to_ktime(div_u64(p->timestamp * 1000, 1208 pcie->freq_to_ticks_div)); 1209 1210 return netif_rx(skb); 1211 } 1212 1213 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1214 struct can_frame *cf, 1215 enum can_state new_state, 1216 enum can_state tx_state, 1217 enum can_state rx_state) 1218 { 1219 can_change_state(can->can.dev, cf, tx_state, rx_state); 1220 1221 if (new_state == CAN_STATE_BUS_OFF) { 1222 struct net_device *ndev = can->can.dev; 1223 unsigned long irq_flags; 1224 1225 spin_lock_irqsave(&can->lock, irq_flags); 1226 netif_stop_queue(can->can.dev); 1227 spin_unlock_irqrestore(&can->lock, irq_flags); 1228 1229 /* Prevent CAN controller from auto recover from bus off */ 1230 if (!can->can.restart_ms) { 1231 kvaser_pciefd_start_controller_flush(can); 1232 can_bus_off(ndev); 1233 } 1234 } 1235 } 1236 1237 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1238 struct can_berr_counter *bec, 1239 enum can_state *new_state, 1240 enum can_state *tx_state, 1241 enum can_state *rx_state) 1242 { 1243 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1244 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1245 *new_state = CAN_STATE_BUS_OFF; 1246 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1247 *new_state = CAN_STATE_BUS_OFF; 1248 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1249 *new_state = CAN_STATE_ERROR_PASSIVE; 1250 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1251 *new_state = CAN_STATE_ERROR_PASSIVE; 1252 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1253 *new_state = CAN_STATE_ERROR_WARNING; 1254 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1255 *new_state = CAN_STATE_ERROR_WARNING; 1256 else 1257 *new_state = CAN_STATE_ERROR_ACTIVE; 1258 1259 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1260 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1261 } 1262 1263 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1264 struct kvaser_pciefd_rx_packet *p) 1265 { 1266 struct can_berr_counter bec; 1267 enum can_state old_state, new_state, tx_state, rx_state; 1268 struct net_device *ndev = can->can.dev; 1269 struct sk_buff *skb; 1270 struct can_frame *cf = NULL; 1271 struct skb_shared_hwtstamps *shhwtstamps; 1272 struct net_device_stats *stats = &ndev->stats; 1273 1274 old_state = can->can.state; 1275 1276 bec.txerr = p->header[0] & 0xff; 1277 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1278 1279 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1280 &rx_state); 1281 1282 skb = alloc_can_err_skb(ndev, &cf); 1283 1284 if (new_state != old_state) { 1285 kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1286 rx_state); 1287 1288 if (old_state == CAN_STATE_BUS_OFF && 1289 new_state == CAN_STATE_ERROR_ACTIVE && 1290 can->can.restart_ms) { 1291 can->can.can_stats.restarts++; 1292 if (skb) 1293 cf->can_id |= CAN_ERR_RESTARTED; 1294 } 1295 } 1296 1297 can->err_rep_cnt++; 1298 can->can.can_stats.bus_error++; 1299 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) 1300 stats->tx_errors++; 1301 else 1302 stats->rx_errors++; 1303 1304 can->bec.txerr = bec.txerr; 1305 can->bec.rxerr = bec.rxerr; 1306 1307 if (!skb) { 1308 stats->rx_dropped++; 1309 return -ENOMEM; 1310 } 1311 1312 shhwtstamps = skb_hwtstamps(skb); 1313 shhwtstamps->hwtstamp = 1314 ns_to_ktime(div_u64(p->timestamp * 1000, 1315 can->kv_pcie->freq_to_ticks_div)); 1316 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; 1317 1318 cf->data[6] = bec.txerr; 1319 cf->data[7] = bec.rxerr; 1320 1321 netif_rx(skb); 1322 return 0; 1323 } 1324 1325 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1326 struct kvaser_pciefd_rx_packet *p) 1327 { 1328 struct kvaser_pciefd_can *can; 1329 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1330 1331 if (ch_id >= pcie->nr_channels) 1332 return -EIO; 1333 1334 can = pcie->can[ch_id]; 1335 1336 kvaser_pciefd_rx_error_frame(can, p); 1337 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1338 /* Do not report more errors, until bec_poll_timer expires */ 1339 kvaser_pciefd_disable_err_gen(can); 1340 /* Start polling the error counters */ 1341 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1342 return 0; 1343 } 1344 1345 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1346 struct kvaser_pciefd_rx_packet *p) 1347 { 1348 struct can_berr_counter bec; 1349 enum can_state old_state, new_state, tx_state, rx_state; 1350 1351 old_state = can->can.state; 1352 1353 bec.txerr = p->header[0] & 0xff; 1354 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1355 1356 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1357 &rx_state); 1358 1359 if (new_state != old_state) { 1360 struct net_device *ndev = can->can.dev; 1361 struct sk_buff *skb; 1362 struct can_frame *cf; 1363 struct skb_shared_hwtstamps *shhwtstamps; 1364 1365 skb = alloc_can_err_skb(ndev, &cf); 1366 if (!skb) { 1367 struct net_device_stats *stats = &ndev->stats; 1368 1369 stats->rx_dropped++; 1370 return -ENOMEM; 1371 } 1372 1373 kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1374 rx_state); 1375 1376 if (old_state == CAN_STATE_BUS_OFF && 1377 new_state == CAN_STATE_ERROR_ACTIVE && 1378 can->can.restart_ms) { 1379 can->can.can_stats.restarts++; 1380 cf->can_id |= CAN_ERR_RESTARTED; 1381 } 1382 1383 shhwtstamps = skb_hwtstamps(skb); 1384 shhwtstamps->hwtstamp = 1385 ns_to_ktime(div_u64(p->timestamp * 1000, 1386 can->kv_pcie->freq_to_ticks_div)); 1387 1388 cf->data[6] = bec.txerr; 1389 cf->data[7] = bec.rxerr; 1390 1391 netif_rx(skb); 1392 } 1393 can->bec.txerr = bec.txerr; 1394 can->bec.rxerr = bec.rxerr; 1395 /* Check if we need to poll the error counters */ 1396 if (bec.txerr || bec.rxerr) 1397 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1398 1399 return 0; 1400 } 1401 1402 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1403 struct kvaser_pciefd_rx_packet *p) 1404 { 1405 struct kvaser_pciefd_can *can; 1406 u8 cmdseq; 1407 u32 status; 1408 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1409 1410 if (ch_id >= pcie->nr_channels) 1411 return -EIO; 1412 1413 can = pcie->can[ch_id]; 1414 1415 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1416 cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; 1417 1418 /* Reset done, start abort and flush */ 1419 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1420 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1421 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1422 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1423 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1424 u32 cmd; 1425 1426 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1427 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1428 cmd = KVASER_PCIEFD_KCAN_CMD_AT; 1429 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 1430 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 1431 1432 iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, 1433 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1434 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1435 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1436 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1437 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1438 /* Reset detected, send end of flush if no packet are in FIFO */ 1439 u8 count = ioread32(can->reg_base + 1440 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1441 1442 if (!count) 1443 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1444 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1445 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1446 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { 1447 /* Response to status request received */ 1448 kvaser_pciefd_handle_status_resp(can, p); 1449 if (can->can.state != CAN_STATE_BUS_OFF && 1450 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1451 mod_timer(&can->bec_poll_timer, 1452 KVASER_PCIEFD_BEC_POLL_FREQ); 1453 } 1454 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1455 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { 1456 /* Reset to bus on detected */ 1457 if (!completion_done(&can->start_comp)) 1458 complete(&can->start_comp); 1459 } 1460 1461 return 0; 1462 } 1463 1464 static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, 1465 struct kvaser_pciefd_rx_packet *p) 1466 { 1467 struct kvaser_pciefd_can *can; 1468 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1469 1470 if (ch_id >= pcie->nr_channels) 1471 return -EIO; 1472 1473 can = pcie->can[ch_id]; 1474 1475 /* If this is the last flushed packet, send end of flush */ 1476 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1477 u8 count = ioread32(can->reg_base + 1478 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1479 1480 if (count == 0) 1481 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1482 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1483 } else { 1484 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1485 int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1486 struct net_device_stats *stats = &can->can.dev->stats; 1487 1488 stats->tx_bytes += dlc; 1489 stats->tx_packets++; 1490 1491 if (netif_queue_stopped(can->can.dev)) 1492 netif_wake_queue(can->can.dev); 1493 } 1494 1495 return 0; 1496 } 1497 1498 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1499 struct kvaser_pciefd_rx_packet *p) 1500 { 1501 struct sk_buff *skb; 1502 struct net_device_stats *stats = &can->can.dev->stats; 1503 struct can_frame *cf; 1504 1505 skb = alloc_can_err_skb(can->can.dev, &cf); 1506 1507 stats->tx_errors++; 1508 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1509 if (skb) 1510 cf->can_id |= CAN_ERR_LOSTARB; 1511 can->can.can_stats.arbitration_lost++; 1512 } else if (skb) { 1513 cf->can_id |= CAN_ERR_ACK; 1514 } 1515 1516 if (skb) { 1517 cf->can_id |= CAN_ERR_BUSERROR; 1518 netif_rx(skb); 1519 } else { 1520 stats->rx_dropped++; 1521 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1522 } 1523 } 1524 1525 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1526 struct kvaser_pciefd_rx_packet *p) 1527 { 1528 struct kvaser_pciefd_can *can; 1529 bool one_shot_fail = false; 1530 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1531 1532 if (ch_id >= pcie->nr_channels) 1533 return -EIO; 1534 1535 can = pcie->can[ch_id]; 1536 /* Ignore control packet ACK */ 1537 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1538 return 0; 1539 1540 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1541 kvaser_pciefd_handle_nack_packet(can, p); 1542 one_shot_fail = true; 1543 } 1544 1545 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1546 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1547 } else { 1548 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1549 int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1550 u8 count = ioread32(can->reg_base + 1551 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1552 1553 if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && 1554 netif_queue_stopped(can->can.dev)) 1555 netif_wake_queue(can->can.dev); 1556 1557 if (!one_shot_fail) { 1558 struct net_device_stats *stats = &can->can.dev->stats; 1559 1560 stats->tx_bytes += dlc; 1561 stats->tx_packets++; 1562 } 1563 } 1564 1565 return 0; 1566 } 1567 1568 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1569 struct kvaser_pciefd_rx_packet *p) 1570 { 1571 struct kvaser_pciefd_can *can; 1572 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1573 1574 if (ch_id >= pcie->nr_channels) 1575 return -EIO; 1576 1577 can = pcie->can[ch_id]; 1578 1579 if (!completion_done(&can->flush_comp)) 1580 complete(&can->flush_comp); 1581 1582 return 0; 1583 } 1584 1585 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1586 int dma_buf) 1587 { 1588 __le32 *buffer = pcie->dma_data[dma_buf]; 1589 __le64 timestamp; 1590 struct kvaser_pciefd_rx_packet packet; 1591 struct kvaser_pciefd_rx_packet *p = &packet; 1592 u8 type; 1593 int pos = *start_pos; 1594 int size; 1595 int ret = 0; 1596 1597 size = le32_to_cpu(buffer[pos++]); 1598 if (!size) { 1599 *start_pos = 0; 1600 return 0; 1601 } 1602 1603 p->header[0] = le32_to_cpu(buffer[pos++]); 1604 p->header[1] = le32_to_cpu(buffer[pos++]); 1605 1606 /* Read 64-bit timestamp */ 1607 memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1608 pos += 2; 1609 p->timestamp = le64_to_cpu(timestamp); 1610 1611 type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; 1612 switch (type) { 1613 case KVASER_PCIEFD_PACK_TYPE_DATA: 1614 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1615 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1616 u8 data_len; 1617 1618 data_len = can_fd_dlc2len(p->header[1] >> 1619 KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1620 pos += DIV_ROUND_UP(data_len, 4); 1621 } 1622 break; 1623 1624 case KVASER_PCIEFD_PACK_TYPE_ACK: 1625 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1626 break; 1627 1628 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1629 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1630 break; 1631 1632 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1633 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1634 break; 1635 1636 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1637 ret = kvaser_pciefd_handle_eack_packet(pcie, p); 1638 break; 1639 1640 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1641 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1642 break; 1643 1644 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1645 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1646 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1647 dev_info(&pcie->pci->dev, 1648 "Received unexpected packet type 0x%08X\n", type); 1649 break; 1650 1651 default: 1652 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1653 ret = -EIO; 1654 break; 1655 } 1656 1657 if (ret) 1658 return ret; 1659 1660 /* Position does not point to the end of the package, 1661 * corrupted packet size? 1662 */ 1663 if ((*start_pos + size) != pos) 1664 return -EIO; 1665 1666 /* Point to the next packet header, if any */ 1667 *start_pos = pos; 1668 1669 return ret; 1670 } 1671 1672 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1673 { 1674 int pos = 0; 1675 int res = 0; 1676 1677 do { 1678 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1679 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1680 1681 return res; 1682 } 1683 1684 static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1685 { 1686 u32 irq; 1687 1688 irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1689 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1690 kvaser_pciefd_read_buffer(pcie, 0); 1691 /* Reset DMA buffer 0 */ 1692 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1693 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1694 } 1695 1696 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1697 kvaser_pciefd_read_buffer(pcie, 1); 1698 /* Reset DMA buffer 1 */ 1699 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1700 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1701 } 1702 1703 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1704 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1705 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1706 irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1707 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1708 1709 iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1710 return 0; 1711 } 1712 1713 static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1714 { 1715 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1716 1717 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1718 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1719 1720 if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { 1721 u8 count = ioread32(can->reg_base + 1722 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1723 1724 if (count == 0) 1725 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1726 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1727 } 1728 1729 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1730 netdev_err(can->can.dev, 1731 "Fail to change bittiming, when not in reset mode\n"); 1732 1733 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1734 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1735 1736 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1737 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1738 1739 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1740 return 0; 1741 } 1742 1743 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1744 { 1745 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1746 u32 board_irq; 1747 int i; 1748 1749 board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1750 1751 if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) 1752 return IRQ_NONE; 1753 1754 if (board_irq & KVASER_PCIEFD_IRQ_SRB) 1755 kvaser_pciefd_receive_irq(pcie); 1756 1757 for (i = 0; i < pcie->nr_channels; i++) { 1758 if (!pcie->can[i]) { 1759 dev_err(&pcie->pci->dev, 1760 "IRQ mask points to unallocated controller\n"); 1761 break; 1762 } 1763 1764 /* Check that mask matches channel (i) IRQ mask */ 1765 if (board_irq & (1 << i)) 1766 kvaser_pciefd_transmit_irq(pcie->can[i]); 1767 } 1768 1769 iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1770 return IRQ_HANDLED; 1771 } 1772 1773 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1774 { 1775 int i; 1776 struct kvaser_pciefd_can *can; 1777 1778 for (i = 0; i < pcie->nr_channels; i++) { 1779 can = pcie->can[i]; 1780 if (can) { 1781 iowrite32(0, 1782 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1783 kvaser_pciefd_pwm_stop(can); 1784 free_candev(can->can.dev); 1785 } 1786 } 1787 } 1788 1789 static int kvaser_pciefd_probe(struct pci_dev *pdev, 1790 const struct pci_device_id *id) 1791 { 1792 int err; 1793 struct kvaser_pciefd *pcie; 1794 1795 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1796 if (!pcie) 1797 return -ENOMEM; 1798 1799 pci_set_drvdata(pdev, pcie); 1800 pcie->pci = pdev; 1801 1802 err = pci_enable_device(pdev); 1803 if (err) 1804 return err; 1805 1806 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1807 if (err) 1808 goto err_disable_pci; 1809 1810 pcie->reg_base = pci_iomap(pdev, 0, 0); 1811 if (!pcie->reg_base) { 1812 err = -ENOMEM; 1813 goto err_release_regions; 1814 } 1815 1816 err = kvaser_pciefd_setup_board(pcie); 1817 if (err) 1818 goto err_pci_iounmap; 1819 1820 err = kvaser_pciefd_setup_dma(pcie); 1821 if (err) 1822 goto err_pci_iounmap; 1823 1824 pci_set_master(pdev); 1825 1826 err = kvaser_pciefd_setup_can_ctrls(pcie); 1827 if (err) 1828 goto err_teardown_can_ctrls; 1829 1830 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1831 pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1832 1833 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1834 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1835 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1836 pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); 1837 1838 /* Reset IRQ handling, expected to be off before */ 1839 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1840 pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1841 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1842 pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1843 1844 /* Ready the DMA buffers */ 1845 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1846 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1847 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1848 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1849 1850 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1851 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1852 if (err) 1853 goto err_teardown_can_ctrls; 1854 1855 err = kvaser_pciefd_reg_candev(pcie); 1856 if (err) 1857 goto err_free_irq; 1858 1859 return 0; 1860 1861 err_free_irq: 1862 free_irq(pcie->pci->irq, pcie); 1863 1864 err_teardown_can_ctrls: 1865 kvaser_pciefd_teardown_can_ctrls(pcie); 1866 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1867 pci_clear_master(pdev); 1868 1869 err_pci_iounmap: 1870 pci_iounmap(pdev, pcie->reg_base); 1871 1872 err_release_regions: 1873 pci_release_regions(pdev); 1874 1875 err_disable_pci: 1876 pci_disable_device(pdev); 1877 1878 return err; 1879 } 1880 1881 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1882 { 1883 struct kvaser_pciefd_can *can; 1884 int i; 1885 1886 for (i = 0; i < pcie->nr_channels; i++) { 1887 can = pcie->can[i]; 1888 if (can) { 1889 iowrite32(0, 1890 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1891 unregister_candev(can->can.dev); 1892 del_timer(&can->bec_poll_timer); 1893 kvaser_pciefd_pwm_stop(can); 1894 free_candev(can->can.dev); 1895 } 1896 } 1897 } 1898 1899 static void kvaser_pciefd_remove(struct pci_dev *pdev) 1900 { 1901 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1902 1903 kvaser_pciefd_remove_all_ctrls(pcie); 1904 1905 /* Turn off IRQ generation */ 1906 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1907 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1908 pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1909 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1910 1911 free_irq(pcie->pci->irq, pcie); 1912 1913 pci_clear_master(pdev); 1914 pci_iounmap(pdev, pcie->reg_base); 1915 pci_release_regions(pdev); 1916 pci_disable_device(pdev); 1917 } 1918 1919 static struct pci_driver kvaser_pciefd = { 1920 .name = KVASER_PCIEFD_DRV_NAME, 1921 .id_table = kvaser_pciefd_id_table, 1922 .probe = kvaser_pciefd_probe, 1923 .remove = kvaser_pciefd_remove, 1924 }; 1925 1926 module_pci_driver(kvaser_pciefd) 1927