1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.25) 5 * - PEAK linux canfd driver 6 * - Altera Avalon EPCS flash controller driver 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/device.h> 12 #include <linux/pci.h> 13 #include <linux/can/dev.h> 14 #include <linux/timer.h> 15 #include <linux/netdevice.h> 16 #include <linux/crc32.h> 17 #include <linux/iopoll.h> 18 19 MODULE_LICENSE("Dual BSD/GPL"); 20 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 21 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 22 23 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 24 25 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 26 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 27 #define KVASER_PCIEFD_MAX_ERR_REP 256 28 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 29 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 30 #define KVASER_PCIEFD_DMA_COUNT 2 31 32 #define KVASER_PCIEFD_DMA_SIZE (4 * 1024) 33 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) 34 35 #define KVASER_PCIEFD_VENDOR 0x1a07 36 #define KVASER_PCIEFD_4HS_ID 0x0d 37 #define KVASER_PCIEFD_2HS_ID 0x0e 38 #define KVASER_PCIEFD_HS_ID 0x0f 39 #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 40 #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 41 42 /* PCIe IRQ registers */ 43 #define KVASER_PCIEFD_IRQ_REG 0x40 44 #define KVASER_PCIEFD_IEN_REG 0x50 45 /* DMA map */ 46 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 47 /* Kvaser KCAN CAN controller registers */ 48 #define KVASER_PCIEFD_KCAN0_BASE 0x10000 49 #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 50 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 51 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 52 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 53 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 54 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 55 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 56 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 57 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 58 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 59 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 60 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 61 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 62 /* Loopback control register */ 63 #define KVASER_PCIEFD_LOOP_REG 0x1f000 64 /* System identification and information registers */ 65 #define KVASER_PCIEFD_SYSID_BASE 0x1f020 66 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) 67 #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) 68 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) 69 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) 70 /* Shared receive buffer registers */ 71 #define KVASER_PCIEFD_SRB_BASE 0x1f200 72 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) 73 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) 74 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) 75 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) 76 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) 77 /* EPCS flash controller registers */ 78 #define KVASER_PCIEFD_SPI_BASE 0x1fc00 79 #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE 80 #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) 81 #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) 82 #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) 83 #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) 84 85 #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f 86 #define KVASER_PCIEFD_IRQ_SRB BIT(4) 87 88 #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 89 #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 90 #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1 91 92 /* Reset DMA buffer 0, 1 and FIFO offset */ 93 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 94 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 95 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 96 97 /* DMA packet done, buffer 0 and 1 */ 98 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 99 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 100 /* DMA overflow, buffer 0 and 1 */ 101 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 102 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 103 /* DMA underflow, buffer 0 and 1 */ 104 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 105 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 106 107 /* DMA idle */ 108 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 109 /* DMA support */ 110 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 111 112 /* DMA Enable */ 113 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 114 115 /* EPCS flash controller definitions */ 116 #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) 117 #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) 118 #define KVASER_PCIEFD_CFG_MAX_PARAMS 256 119 #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d 120 #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 121 #define KVASER_PCIEFD_CFG_SYS_VER 1 122 #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 123 #define KVASER_PCIEFD_SPI_TMT BIT(5) 124 #define KVASER_PCIEFD_SPI_TRDY BIT(6) 125 #define KVASER_PCIEFD_SPI_RRDY BIT(7) 126 #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14 127 /* Commands for controlling the onboard flash */ 128 #define KVASER_PCIEFD_FLASH_RES_CMD 0xab 129 #define KVASER_PCIEFD_FLASH_READ_CMD 0x3 130 #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5 131 132 /* Kvaser KCAN definitions */ 133 #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) 134 #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) 135 136 #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16 137 /* Request status packet */ 138 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 139 /* Abort, flush and reset */ 140 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 141 142 /* Tx FIFO unaligned read */ 143 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 144 /* Tx FIFO unaligned end */ 145 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 146 /* Bus parameter protection error */ 147 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 148 /* FDF bit when controller is in classic mode */ 149 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 150 /* Rx FIFO overflow */ 151 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 152 /* Abort done */ 153 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 154 /* Tx buffer flush done */ 155 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 156 /* Tx FIFO overflow */ 157 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 158 /* Tx FIFO empty */ 159 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 160 /* Transmitter unaligned */ 161 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 162 163 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 164 165 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 166 /* Abort request */ 167 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 168 /* Idle state. Controller in reset mode and no abort or flush pending */ 169 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 170 /* Bus off */ 171 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 172 /* Reset mode request */ 173 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 174 /* Controller in reset mode */ 175 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 176 /* Controller got one-shot capability */ 177 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 178 /* Controller got CAN FD capability */ 179 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 180 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ 181 KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ 182 KVASER_PCIEFD_KCAN_STAT_IRM) 183 184 /* Reset mode */ 185 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 186 /* Listen only mode */ 187 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 188 /* Error packet enable */ 189 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 190 /* CAN FD non-ISO */ 191 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 192 /* Acknowledgment packet type */ 193 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 194 /* Active error flag enable. Clear to force error passive */ 195 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 196 /* Classic CAN mode */ 197 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 198 199 #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 200 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 201 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 202 203 #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 204 205 /* Kvaser KCAN packet types */ 206 #define KVASER_PCIEFD_PACK_TYPE_DATA 0 207 #define KVASER_PCIEFD_PACK_TYPE_ACK 1 208 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 209 #define KVASER_PCIEFD_PACK_TYPE_ERROR 3 210 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 211 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 212 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 213 #define KVASER_PCIEFD_PACK_TYPE_STATUS 8 214 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 215 216 /* Kvaser KCAN packet common definitions */ 217 #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff 218 #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 219 #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28 220 221 /* Kvaser KCAN TDATA and RDATA first word */ 222 #define KVASER_PCIEFD_RPACKET_IDE BIT(30) 223 #define KVASER_PCIEFD_RPACKET_RTR BIT(29) 224 /* Kvaser KCAN TDATA and RDATA second word */ 225 #define KVASER_PCIEFD_RPACKET_ESI BIT(13) 226 #define KVASER_PCIEFD_RPACKET_BRS BIT(14) 227 #define KVASER_PCIEFD_RPACKET_FDF BIT(15) 228 #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8 229 /* Kvaser KCAN TDATA second word */ 230 #define KVASER_PCIEFD_TPACKET_SMS BIT(16) 231 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 232 233 /* Kvaser KCAN APACKET */ 234 #define KVASER_PCIEFD_APACKET_FLU BIT(8) 235 #define KVASER_PCIEFD_APACKET_CT BIT(9) 236 #define KVASER_PCIEFD_APACKET_ABL BIT(10) 237 #define KVASER_PCIEFD_APACKET_NACK BIT(11) 238 239 /* Kvaser KCAN SPACK first word */ 240 #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8 241 #define KVASER_PCIEFD_SPACK_BOFF BIT(16) 242 #define KVASER_PCIEFD_SPACK_IDET BIT(20) 243 #define KVASER_PCIEFD_SPACK_IRM BIT(21) 244 #define KVASER_PCIEFD_SPACK_RMCD BIT(22) 245 /* Kvaser KCAN SPACK second word */ 246 #define KVASER_PCIEFD_SPACK_AUTO BIT(21) 247 #define KVASER_PCIEFD_SPACK_EWLR BIT(23) 248 #define KVASER_PCIEFD_SPACK_EPLR BIT(24) 249 250 struct kvaser_pciefd; 251 252 struct kvaser_pciefd_can { 253 struct can_priv can; 254 struct kvaser_pciefd *kv_pcie; 255 void __iomem *reg_base; 256 struct can_berr_counter bec; 257 u8 cmd_seq; 258 int err_rep_cnt; 259 int echo_idx; 260 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 261 spinlock_t echo_lock; /* Locks the message echo buffer */ 262 struct timer_list bec_poll_timer; 263 struct completion start_comp, flush_comp; 264 }; 265 266 struct kvaser_pciefd { 267 struct pci_dev *pci; 268 void __iomem *reg_base; 269 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 270 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 271 u8 nr_channels; 272 u32 bus_freq; 273 u32 freq; 274 u32 freq_to_ticks_div; 275 }; 276 277 struct kvaser_pciefd_rx_packet { 278 u32 header[2]; 279 u64 timestamp; 280 }; 281 282 struct kvaser_pciefd_tx_packet { 283 u32 header[2]; 284 u8 data[64]; 285 }; 286 287 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 288 .name = KVASER_PCIEFD_DRV_NAME, 289 .tseg1_min = 1, 290 .tseg1_max = 512, 291 .tseg2_min = 1, 292 .tseg2_max = 32, 293 .sjw_max = 16, 294 .brp_min = 1, 295 .brp_max = 8192, 296 .brp_inc = 1, 297 }; 298 299 struct kvaser_pciefd_cfg_param { 300 __le32 magic; 301 __le32 nr; 302 __le32 len; 303 u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; 304 }; 305 306 struct kvaser_pciefd_cfg_img { 307 __le32 version; 308 __le32 magic; 309 __le32 crc; 310 struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; 311 }; 312 313 static struct pci_device_id kvaser_pciefd_id_table[] = { 314 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, 315 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, 316 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, 317 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, 318 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, 319 { 0,}, 320 }; 321 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 322 323 /* Onboard flash memory functions */ 324 static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) 325 { 326 u32 res; 327 int ret; 328 329 ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG, 330 res, res & msk, 0, 10); 331 332 return ret; 333 } 334 335 static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, 336 u32 tx_len, u8 *rx, u32 rx_len) 337 { 338 int c; 339 340 iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG); 341 iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 342 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 343 344 c = tx_len; 345 while (c--) { 346 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 347 return -EIO; 348 349 iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 350 351 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 352 return -EIO; 353 354 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 355 } 356 357 c = rx_len; 358 while (c-- > 0) { 359 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) 360 return -EIO; 361 362 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); 363 364 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) 365 return -EIO; 366 367 *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); 368 } 369 370 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) 371 return -EIO; 372 373 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); 374 375 if (c != -1) { 376 dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); 377 return -EIO; 378 } 379 380 return 0; 381 } 382 383 static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, 384 struct kvaser_pciefd_cfg_img *img) 385 { 386 int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; 387 int res, crc; 388 u8 *crc_buff; 389 390 u8 cmd[] = { 391 KVASER_PCIEFD_FLASH_READ_CMD, 392 (u8)((offset >> 16) & 0xff), 393 (u8)((offset >> 8) & 0xff), 394 (u8)(offset & 0xff) 395 }; 396 397 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img, 398 KVASER_PCIEFD_CFG_IMG_SZ); 399 if (res) 400 return res; 401 402 crc_buff = (u8 *)img->params; 403 404 if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { 405 dev_err(&pcie->pci->dev, 406 "Config flash corrupted, version number is wrong\n"); 407 return -ENODEV; 408 } 409 410 if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { 411 dev_err(&pcie->pci->dev, 412 "Config flash corrupted, magic number is wrong\n"); 413 return -ENODEV; 414 } 415 416 crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); 417 if (le32_to_cpu(img->crc) != crc) { 418 dev_err(&pcie->pci->dev, 419 "Stored CRC does not match flash image contents\n"); 420 return -EIO; 421 } 422 423 return 0; 424 } 425 426 static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, 427 struct kvaser_pciefd_cfg_img *img) 428 { 429 struct kvaser_pciefd_cfg_param *param; 430 431 param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; 432 memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); 433 } 434 435 static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) 436 { 437 int res; 438 struct kvaser_pciefd_cfg_img *img; 439 440 /* Read electronic signature */ 441 u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; 442 443 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1); 444 if (res) 445 return -EIO; 446 447 img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); 448 if (!img) 449 return -ENOMEM; 450 451 if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) { 452 dev_err(&pcie->pci->dev, 453 "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n", 454 cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16); 455 456 res = -ENODEV; 457 goto image_free; 458 } 459 460 cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; 461 res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); 462 if (res) { 463 goto image_free; 464 } else if (cmd[0] & 1) { 465 res = -EIO; 466 /* No write is ever done, the WIP should never be set */ 467 dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); 468 goto image_free; 469 } 470 471 res = kvaser_pciefd_cfg_read_and_verify(pcie, img); 472 if (res) { 473 res = -EIO; 474 goto image_free; 475 } 476 477 kvaser_pciefd_cfg_read_params(pcie, img); 478 479 image_free: 480 kfree(img); 481 return res; 482 } 483 484 static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 485 { 486 u32 cmd; 487 488 cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; 489 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 490 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 491 } 492 493 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 494 { 495 u32 mode; 496 unsigned long irq; 497 498 spin_lock_irqsave(&can->lock, irq); 499 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 500 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 501 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 502 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 503 } 504 spin_unlock_irqrestore(&can->lock, irq); 505 } 506 507 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 508 { 509 u32 mode; 510 unsigned long irq; 511 512 spin_lock_irqsave(&can->lock, irq); 513 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 514 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 515 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 516 spin_unlock_irqrestore(&can->lock, irq); 517 } 518 519 static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 520 { 521 u32 msk; 522 523 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 524 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 525 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 526 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 527 KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; 528 529 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 530 531 return 0; 532 } 533 534 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 535 { 536 u32 mode; 537 unsigned long irq; 538 539 spin_lock_irqsave(&can->lock, irq); 540 541 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 542 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 543 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 544 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 545 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 546 else 547 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 548 } else { 549 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 550 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 551 } 552 553 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 554 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 555 556 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 557 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 558 /* Use ACK packet type */ 559 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 560 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 561 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 562 563 spin_unlock_irqrestore(&can->lock, irq); 564 } 565 566 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 567 { 568 u32 status; 569 unsigned long irq; 570 571 spin_lock_irqsave(&can->lock, irq); 572 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 573 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, 574 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 575 576 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 577 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 578 u32 cmd; 579 580 /* If controller is already idle, run abort, flush and reset */ 581 cmd = KVASER_PCIEFD_KCAN_CMD_AT; 582 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 583 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 584 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 585 u32 mode; 586 587 /* Put controller in reset mode */ 588 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 589 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 590 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 591 } 592 593 spin_unlock_irqrestore(&can->lock, irq); 594 } 595 596 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 597 { 598 u32 mode; 599 unsigned long irq; 600 601 del_timer(&can->bec_poll_timer); 602 603 if (!completion_done(&can->flush_comp)) 604 kvaser_pciefd_start_controller_flush(can); 605 606 if (!wait_for_completion_timeout(&can->flush_comp, 607 KVASER_PCIEFD_WAIT_TIMEOUT)) { 608 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 609 return -ETIMEDOUT; 610 } 611 612 spin_lock_irqsave(&can->lock, irq); 613 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 614 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 615 616 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, 617 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 618 619 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 620 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 621 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 622 spin_unlock_irqrestore(&can->lock, irq); 623 624 if (!wait_for_completion_timeout(&can->start_comp, 625 KVASER_PCIEFD_WAIT_TIMEOUT)) { 626 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 627 return -ETIMEDOUT; 628 } 629 /* Reset interrupt handling */ 630 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 631 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 632 633 kvaser_pciefd_set_tx_irq(can); 634 kvaser_pciefd_setup_controller(can); 635 636 can->can.state = CAN_STATE_ERROR_ACTIVE; 637 netif_wake_queue(can->can.dev); 638 can->bec.txerr = 0; 639 can->bec.rxerr = 0; 640 can->err_rep_cnt = 0; 641 642 return 0; 643 } 644 645 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 646 { 647 u8 top; 648 u32 pwm_ctrl; 649 unsigned long irq; 650 651 spin_lock_irqsave(&can->lock, irq); 652 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 653 top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; 654 655 /* Set duty cycle to zero */ 656 pwm_ctrl |= top; 657 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 658 spin_unlock_irqrestore(&can->lock, irq); 659 } 660 661 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 662 { 663 int top, trigger; 664 u32 pwm_ctrl; 665 unsigned long irq; 666 667 kvaser_pciefd_pwm_stop(can); 668 spin_lock_irqsave(&can->lock, irq); 669 670 /* Set frequency to 500 KHz*/ 671 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 672 673 pwm_ctrl = top & 0xff; 674 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 675 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 676 677 /* Set duty cycle to 95 */ 678 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 679 pwm_ctrl = trigger & 0xff; 680 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; 681 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 682 spin_unlock_irqrestore(&can->lock, irq); 683 } 684 685 static int kvaser_pciefd_open(struct net_device *netdev) 686 { 687 int err; 688 struct kvaser_pciefd_can *can = netdev_priv(netdev); 689 690 err = open_candev(netdev); 691 if (err) 692 return err; 693 694 err = kvaser_pciefd_bus_on(can); 695 if (err) { 696 close_candev(netdev); 697 return err; 698 } 699 700 return 0; 701 } 702 703 static int kvaser_pciefd_stop(struct net_device *netdev) 704 { 705 struct kvaser_pciefd_can *can = netdev_priv(netdev); 706 int ret = 0; 707 708 /* Don't interrupt ongoing flush */ 709 if (!completion_done(&can->flush_comp)) 710 kvaser_pciefd_start_controller_flush(can); 711 712 if (!wait_for_completion_timeout(&can->flush_comp, 713 KVASER_PCIEFD_WAIT_TIMEOUT)) { 714 netdev_err(can->can.dev, "Timeout during stop\n"); 715 ret = -ETIMEDOUT; 716 } else { 717 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 718 del_timer(&can->bec_poll_timer); 719 } 720 close_candev(netdev); 721 722 return ret; 723 } 724 725 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 726 struct kvaser_pciefd_can *can, 727 struct sk_buff *skb) 728 { 729 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 730 int packet_size; 731 int seq = can->echo_idx; 732 733 memset(p, 0, sizeof(*p)); 734 735 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 736 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 737 738 if (cf->can_id & CAN_RTR_FLAG) 739 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 740 741 if (cf->can_id & CAN_EFF_FLAG) 742 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 743 744 p->header[0] |= cf->can_id & CAN_EFF_MASK; 745 p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; 746 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 747 748 if (can_is_canfd_skb(skb)) { 749 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 750 if (cf->flags & CANFD_BRS) 751 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 752 if (cf->flags & CANFD_ESI) 753 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 754 } 755 756 p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; 757 758 packet_size = cf->len; 759 memcpy(p->data, cf->data, packet_size); 760 761 return DIV_ROUND_UP(packet_size, 4); 762 } 763 764 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 765 struct net_device *netdev) 766 { 767 struct kvaser_pciefd_can *can = netdev_priv(netdev); 768 unsigned long irq_flags; 769 struct kvaser_pciefd_tx_packet packet; 770 int nwords; 771 u8 count; 772 773 if (can_dropped_invalid_skb(netdev, skb)) 774 return NETDEV_TX_OK; 775 776 nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 777 778 spin_lock_irqsave(&can->echo_lock, irq_flags); 779 780 /* Prepare and save echo skb in internal slot */ 781 can_put_echo_skb(skb, netdev, can->echo_idx, 0); 782 783 /* Move echo index to the next slot */ 784 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 785 786 /* Write header to fifo */ 787 iowrite32(packet.header[0], 788 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 789 iowrite32(packet.header[1], 790 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 791 792 if (nwords) { 793 u32 data_last = ((u32 *)packet.data)[nwords - 1]; 794 795 /* Write data to fifo, except last word */ 796 iowrite32_rep(can->reg_base + 797 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 798 nwords - 1); 799 /* Write last word to end of fifo */ 800 __raw_writel(data_last, can->reg_base + 801 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 802 } else { 803 /* Complete write to fifo */ 804 __raw_writel(0, can->reg_base + 805 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 806 } 807 808 count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 809 /* No room for a new message, stop the queue until at least one 810 * successful transmit 811 */ 812 if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || 813 can->can.echo_skb[can->echo_idx]) 814 netif_stop_queue(netdev); 815 816 spin_unlock_irqrestore(&can->echo_lock, irq_flags); 817 818 return NETDEV_TX_OK; 819 } 820 821 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 822 { 823 u32 mode, test, btrn; 824 unsigned long irq_flags; 825 int ret; 826 struct can_bittiming *bt; 827 828 if (data) 829 bt = &can->can.data_bittiming; 830 else 831 bt = &can->can.bittiming; 832 833 btrn = ((bt->phase_seg2 - 1) & 0x1f) << 834 KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | 835 (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << 836 KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | 837 ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT | 838 ((bt->brp - 1) & 0x1fff); 839 840 spin_lock_irqsave(&can->lock, irq_flags); 841 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 842 843 /* Put the circuit in reset mode */ 844 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 845 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 846 847 /* Can only set bittiming if in reset mode */ 848 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 849 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 850 0, 10); 851 852 if (ret) { 853 spin_unlock_irqrestore(&can->lock, irq_flags); 854 return -EBUSY; 855 } 856 857 if (data) 858 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 859 else 860 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 861 862 /* Restore previous reset mode status */ 863 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 864 865 spin_unlock_irqrestore(&can->lock, irq_flags); 866 return 0; 867 } 868 869 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 870 { 871 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 872 } 873 874 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 875 { 876 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 877 } 878 879 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 880 { 881 struct kvaser_pciefd_can *can = netdev_priv(ndev); 882 int ret = 0; 883 884 switch (mode) { 885 case CAN_MODE_START: 886 if (!can->can.restart_ms) 887 ret = kvaser_pciefd_bus_on(can); 888 break; 889 default: 890 return -EOPNOTSUPP; 891 } 892 893 return ret; 894 } 895 896 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 897 struct can_berr_counter *bec) 898 { 899 struct kvaser_pciefd_can *can = netdev_priv(ndev); 900 901 bec->rxerr = can->bec.rxerr; 902 bec->txerr = can->bec.txerr; 903 return 0; 904 } 905 906 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 907 { 908 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 909 910 kvaser_pciefd_enable_err_gen(can); 911 kvaser_pciefd_request_status(can); 912 can->err_rep_cnt = 0; 913 } 914 915 static const struct net_device_ops kvaser_pciefd_netdev_ops = { 916 .ndo_open = kvaser_pciefd_open, 917 .ndo_stop = kvaser_pciefd_stop, 918 .ndo_start_xmit = kvaser_pciefd_start_xmit, 919 .ndo_change_mtu = can_change_mtu, 920 }; 921 922 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 923 { 924 int i; 925 926 for (i = 0; i < pcie->nr_channels; i++) { 927 struct net_device *netdev; 928 struct kvaser_pciefd_can *can; 929 u32 status, tx_npackets; 930 931 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 932 KVASER_PCIEFD_CAN_TX_MAX_COUNT); 933 if (!netdev) 934 return -ENOMEM; 935 936 can = netdev_priv(netdev); 937 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 938 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + 939 i * KVASER_PCIEFD_KCAN_BASE_OFFSET; 940 941 can->kv_pcie = pcie; 942 can->cmd_seq = 0; 943 can->err_rep_cnt = 0; 944 can->bec.txerr = 0; 945 can->bec.rxerr = 0; 946 947 init_completion(&can->start_comp); 948 init_completion(&can->flush_comp); 949 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 950 0); 951 952 tx_npackets = ioread32(can->reg_base + 953 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); 954 if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & 955 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) { 956 dev_err(&pcie->pci->dev, 957 "Max Tx count is smaller than expected\n"); 958 959 free_candev(netdev); 960 return -ENODEV; 961 } 962 963 can->can.clock.freq = pcie->freq; 964 can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; 965 can->echo_idx = 0; 966 spin_lock_init(&can->echo_lock); 967 spin_lock_init(&can->lock); 968 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 969 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 970 971 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 972 can->can.do_set_data_bittiming = 973 kvaser_pciefd_set_data_bittiming; 974 975 can->can.do_set_mode = kvaser_pciefd_set_mode; 976 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 977 978 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 979 CAN_CTRLMODE_FD | 980 CAN_CTRLMODE_FD_NON_ISO; 981 982 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 983 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 984 dev_err(&pcie->pci->dev, 985 "CAN FD not supported as expected %d\n", i); 986 987 free_candev(netdev); 988 return -ENODEV; 989 } 990 991 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 992 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 993 994 netdev->flags |= IFF_ECHO; 995 996 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 997 998 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 999 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | 1000 KVASER_PCIEFD_KCAN_IRQ_TFD, 1001 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1002 1003 pcie->can[i] = can; 1004 kvaser_pciefd_pwm_start(can); 1005 } 1006 1007 return 0; 1008 } 1009 1010 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 1011 { 1012 int i; 1013 1014 for (i = 0; i < pcie->nr_channels; i++) { 1015 int err = register_candev(pcie->can[i]->can.dev); 1016 1017 if (err) { 1018 int j; 1019 1020 /* Unregister all successfully registered devices. */ 1021 for (j = 0; j < i; j++) 1022 unregister_candev(pcie->can[j]->can.dev); 1023 return err; 1024 } 1025 } 1026 1027 return 0; 1028 } 1029 1030 static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, 1031 dma_addr_t addr, int offset) 1032 { 1033 u32 word1, word2; 1034 1035 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1036 word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; 1037 word2 = addr >> 32; 1038 #else 1039 word1 = addr; 1040 word2 = 0; 1041 #endif 1042 iowrite32(word1, pcie->reg_base + offset); 1043 iowrite32(word2, pcie->reg_base + offset + 4); 1044 } 1045 1046 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1047 { 1048 int i; 1049 u32 srb_status; 1050 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1051 1052 /* Disable the DMA */ 1053 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1054 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1055 unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; 1056 1057 pcie->dma_data[i] = 1058 dmam_alloc_coherent(&pcie->pci->dev, 1059 KVASER_PCIEFD_DMA_SIZE, 1060 &dma_addr[i], 1061 GFP_KERNEL); 1062 1063 if (!pcie->dma_data[i] || !dma_addr[i]) { 1064 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1065 KVASER_PCIEFD_DMA_SIZE); 1066 return -ENOMEM; 1067 } 1068 1069 kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); 1070 } 1071 1072 /* Reset Rx FIFO, and both DMA buffers */ 1073 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1074 KVASER_PCIEFD_SRB_CMD_RDB1, 1075 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1076 1077 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1078 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1079 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1080 return -EIO; 1081 } 1082 1083 /* Enable the DMA */ 1084 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1085 pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1086 1087 return 0; 1088 } 1089 1090 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1091 { 1092 u32 sysid, srb_status, build; 1093 u8 sysid_nr_chan; 1094 int ret; 1095 1096 ret = kvaser_pciefd_read_cfg(pcie); 1097 if (ret) 1098 return ret; 1099 1100 sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); 1101 sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff; 1102 if (pcie->nr_channels != sysid_nr_chan) { 1103 dev_err(&pcie->pci->dev, 1104 "Number of channels does not match: %u vs %u\n", 1105 pcie->nr_channels, 1106 sysid_nr_chan); 1107 return -ENODEV; 1108 } 1109 1110 if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) 1111 pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; 1112 1113 build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); 1114 dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", 1115 (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, 1116 sysid & 0xff, 1117 (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); 1118 1119 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 1120 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1121 dev_err(&pcie->pci->dev, 1122 "Hardware without DMA is not supported\n"); 1123 return -ENODEV; 1124 } 1125 1126 pcie->bus_freq = ioread32(pcie->reg_base + 1127 KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1128 pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1129 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1130 if (pcie->freq_to_ticks_div == 0) 1131 pcie->freq_to_ticks_div = 1; 1132 1133 /* Turn off all loopback functionality */ 1134 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); 1135 return ret; 1136 } 1137 1138 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1139 struct kvaser_pciefd_rx_packet *p, 1140 __le32 *data) 1141 { 1142 struct sk_buff *skb; 1143 struct canfd_frame *cf; 1144 struct can_priv *priv; 1145 struct net_device_stats *stats; 1146 struct skb_shared_hwtstamps *shhwtstamps; 1147 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1148 1149 if (ch_id >= pcie->nr_channels) 1150 return -EIO; 1151 1152 priv = &pcie->can[ch_id]->can; 1153 stats = &priv->dev->stats; 1154 1155 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1156 skb = alloc_canfd_skb(priv->dev, &cf); 1157 if (!skb) { 1158 stats->rx_dropped++; 1159 return -ENOMEM; 1160 } 1161 1162 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1163 cf->flags |= CANFD_BRS; 1164 1165 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1166 cf->flags |= CANFD_ESI; 1167 } else { 1168 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1169 if (!skb) { 1170 stats->rx_dropped++; 1171 return -ENOMEM; 1172 } 1173 } 1174 1175 cf->can_id = p->header[0] & CAN_EFF_MASK; 1176 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1177 cf->can_id |= CAN_EFF_FLAG; 1178 1179 cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1180 1181 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) 1182 cf->can_id |= CAN_RTR_FLAG; 1183 else 1184 memcpy(cf->data, data, cf->len); 1185 1186 shhwtstamps = skb_hwtstamps(skb); 1187 1188 shhwtstamps->hwtstamp = 1189 ns_to_ktime(div_u64(p->timestamp * 1000, 1190 pcie->freq_to_ticks_div)); 1191 1192 stats->rx_bytes += cf->len; 1193 stats->rx_packets++; 1194 1195 return netif_rx(skb); 1196 } 1197 1198 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1199 struct can_frame *cf, 1200 enum can_state new_state, 1201 enum can_state tx_state, 1202 enum can_state rx_state) 1203 { 1204 can_change_state(can->can.dev, cf, tx_state, rx_state); 1205 1206 if (new_state == CAN_STATE_BUS_OFF) { 1207 struct net_device *ndev = can->can.dev; 1208 unsigned long irq_flags; 1209 1210 spin_lock_irqsave(&can->lock, irq_flags); 1211 netif_stop_queue(can->can.dev); 1212 spin_unlock_irqrestore(&can->lock, irq_flags); 1213 1214 /* Prevent CAN controller from auto recover from bus off */ 1215 if (!can->can.restart_ms) { 1216 kvaser_pciefd_start_controller_flush(can); 1217 can_bus_off(ndev); 1218 } 1219 } 1220 } 1221 1222 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1223 struct can_berr_counter *bec, 1224 enum can_state *new_state, 1225 enum can_state *tx_state, 1226 enum can_state *rx_state) 1227 { 1228 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1229 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1230 *new_state = CAN_STATE_BUS_OFF; 1231 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1232 *new_state = CAN_STATE_BUS_OFF; 1233 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1234 *new_state = CAN_STATE_ERROR_PASSIVE; 1235 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1236 *new_state = CAN_STATE_ERROR_PASSIVE; 1237 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1238 *new_state = CAN_STATE_ERROR_WARNING; 1239 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1240 *new_state = CAN_STATE_ERROR_WARNING; 1241 else 1242 *new_state = CAN_STATE_ERROR_ACTIVE; 1243 1244 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1245 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1246 } 1247 1248 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1249 struct kvaser_pciefd_rx_packet *p) 1250 { 1251 struct can_berr_counter bec; 1252 enum can_state old_state, new_state, tx_state, rx_state; 1253 struct net_device *ndev = can->can.dev; 1254 struct sk_buff *skb; 1255 struct can_frame *cf = NULL; 1256 struct skb_shared_hwtstamps *shhwtstamps; 1257 struct net_device_stats *stats = &ndev->stats; 1258 1259 old_state = can->can.state; 1260 1261 bec.txerr = p->header[0] & 0xff; 1262 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1263 1264 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1265 &rx_state); 1266 1267 skb = alloc_can_err_skb(ndev, &cf); 1268 1269 if (new_state != old_state) { 1270 kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1271 rx_state); 1272 1273 if (old_state == CAN_STATE_BUS_OFF && 1274 new_state == CAN_STATE_ERROR_ACTIVE && 1275 can->can.restart_ms) { 1276 can->can.can_stats.restarts++; 1277 if (skb) 1278 cf->can_id |= CAN_ERR_RESTARTED; 1279 } 1280 } 1281 1282 can->err_rep_cnt++; 1283 can->can.can_stats.bus_error++; 1284 stats->rx_errors++; 1285 1286 can->bec.txerr = bec.txerr; 1287 can->bec.rxerr = bec.rxerr; 1288 1289 if (!skb) { 1290 stats->rx_dropped++; 1291 return -ENOMEM; 1292 } 1293 1294 shhwtstamps = skb_hwtstamps(skb); 1295 shhwtstamps->hwtstamp = 1296 ns_to_ktime(div_u64(p->timestamp * 1000, 1297 can->kv_pcie->freq_to_ticks_div)); 1298 cf->can_id |= CAN_ERR_BUSERROR; 1299 1300 cf->data[6] = bec.txerr; 1301 cf->data[7] = bec.rxerr; 1302 1303 stats->rx_packets++; 1304 stats->rx_bytes += cf->len; 1305 1306 netif_rx(skb); 1307 return 0; 1308 } 1309 1310 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1311 struct kvaser_pciefd_rx_packet *p) 1312 { 1313 struct kvaser_pciefd_can *can; 1314 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1315 1316 if (ch_id >= pcie->nr_channels) 1317 return -EIO; 1318 1319 can = pcie->can[ch_id]; 1320 1321 kvaser_pciefd_rx_error_frame(can, p); 1322 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1323 /* Do not report more errors, until bec_poll_timer expires */ 1324 kvaser_pciefd_disable_err_gen(can); 1325 /* Start polling the error counters */ 1326 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1327 return 0; 1328 } 1329 1330 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1331 struct kvaser_pciefd_rx_packet *p) 1332 { 1333 struct can_berr_counter bec; 1334 enum can_state old_state, new_state, tx_state, rx_state; 1335 1336 old_state = can->can.state; 1337 1338 bec.txerr = p->header[0] & 0xff; 1339 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; 1340 1341 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, 1342 &rx_state); 1343 1344 if (new_state != old_state) { 1345 struct net_device *ndev = can->can.dev; 1346 struct sk_buff *skb; 1347 struct can_frame *cf; 1348 struct skb_shared_hwtstamps *shhwtstamps; 1349 1350 skb = alloc_can_err_skb(ndev, &cf); 1351 if (!skb) { 1352 struct net_device_stats *stats = &ndev->stats; 1353 1354 stats->rx_dropped++; 1355 return -ENOMEM; 1356 } 1357 1358 kvaser_pciefd_change_state(can, cf, new_state, tx_state, 1359 rx_state); 1360 1361 if (old_state == CAN_STATE_BUS_OFF && 1362 new_state == CAN_STATE_ERROR_ACTIVE && 1363 can->can.restart_ms) { 1364 can->can.can_stats.restarts++; 1365 cf->can_id |= CAN_ERR_RESTARTED; 1366 } 1367 1368 shhwtstamps = skb_hwtstamps(skb); 1369 shhwtstamps->hwtstamp = 1370 ns_to_ktime(div_u64(p->timestamp * 1000, 1371 can->kv_pcie->freq_to_ticks_div)); 1372 1373 cf->data[6] = bec.txerr; 1374 cf->data[7] = bec.rxerr; 1375 1376 netif_rx(skb); 1377 } 1378 can->bec.txerr = bec.txerr; 1379 can->bec.rxerr = bec.rxerr; 1380 /* Check if we need to poll the error counters */ 1381 if (bec.txerr || bec.rxerr) 1382 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1383 1384 return 0; 1385 } 1386 1387 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1388 struct kvaser_pciefd_rx_packet *p) 1389 { 1390 struct kvaser_pciefd_can *can; 1391 u8 cmdseq; 1392 u32 status; 1393 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1394 1395 if (ch_id >= pcie->nr_channels) 1396 return -EIO; 1397 1398 can = pcie->can[ch_id]; 1399 1400 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1401 cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; 1402 1403 /* Reset done, start abort and flush */ 1404 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1405 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1406 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1407 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1408 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1409 u32 cmd; 1410 1411 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1412 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1413 cmd = KVASER_PCIEFD_KCAN_CMD_AT; 1414 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; 1415 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 1416 1417 iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, 1418 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1419 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1420 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1421 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && 1422 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1423 /* Reset detected, send end of flush if no packet are in FIFO */ 1424 u8 count = ioread32(can->reg_base + 1425 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1426 1427 if (!count) 1428 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1429 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1430 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1431 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { 1432 /* Response to status request received */ 1433 kvaser_pciefd_handle_status_resp(can, p); 1434 if (can->can.state != CAN_STATE_BUS_OFF && 1435 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1436 mod_timer(&can->bec_poll_timer, 1437 KVASER_PCIEFD_BEC_POLL_FREQ); 1438 } 1439 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1440 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { 1441 /* Reset to bus on detected */ 1442 if (!completion_done(&can->start_comp)) 1443 complete(&can->start_comp); 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, 1450 struct kvaser_pciefd_rx_packet *p) 1451 { 1452 struct kvaser_pciefd_can *can; 1453 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1454 1455 if (ch_id >= pcie->nr_channels) 1456 return -EIO; 1457 1458 can = pcie->can[ch_id]; 1459 1460 /* If this is the last flushed packet, send end of flush */ 1461 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1462 u8 count = ioread32(can->reg_base + 1463 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1464 1465 if (count == 0) 1466 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1467 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1468 } else { 1469 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1470 int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1471 struct net_device_stats *stats = &can->can.dev->stats; 1472 1473 stats->tx_bytes += dlc; 1474 stats->tx_packets++; 1475 1476 if (netif_queue_stopped(can->can.dev)) 1477 netif_wake_queue(can->can.dev); 1478 } 1479 1480 return 0; 1481 } 1482 1483 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1484 struct kvaser_pciefd_rx_packet *p) 1485 { 1486 struct sk_buff *skb; 1487 struct net_device_stats *stats = &can->can.dev->stats; 1488 struct can_frame *cf; 1489 1490 skb = alloc_can_err_skb(can->can.dev, &cf); 1491 1492 stats->tx_errors++; 1493 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1494 if (skb) 1495 cf->can_id |= CAN_ERR_LOSTARB; 1496 can->can.can_stats.arbitration_lost++; 1497 } else if (skb) { 1498 cf->can_id |= CAN_ERR_ACK; 1499 } 1500 1501 if (skb) { 1502 cf->can_id |= CAN_ERR_BUSERROR; 1503 stats->rx_bytes += cf->len; 1504 stats->rx_packets++; 1505 netif_rx(skb); 1506 } else { 1507 stats->rx_dropped++; 1508 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1509 } 1510 } 1511 1512 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1513 struct kvaser_pciefd_rx_packet *p) 1514 { 1515 struct kvaser_pciefd_can *can; 1516 bool one_shot_fail = false; 1517 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1518 1519 if (ch_id >= pcie->nr_channels) 1520 return -EIO; 1521 1522 can = pcie->can[ch_id]; 1523 /* Ignore control packet ACK */ 1524 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1525 return 0; 1526 1527 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1528 kvaser_pciefd_handle_nack_packet(can, p); 1529 one_shot_fail = true; 1530 } 1531 1532 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1533 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1534 } else { 1535 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; 1536 int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1537 u8 count = ioread32(can->reg_base + 1538 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1539 1540 if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && 1541 netif_queue_stopped(can->can.dev)) 1542 netif_wake_queue(can->can.dev); 1543 1544 if (!one_shot_fail) { 1545 struct net_device_stats *stats = &can->can.dev->stats; 1546 1547 stats->tx_bytes += dlc; 1548 stats->tx_packets++; 1549 } 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1556 struct kvaser_pciefd_rx_packet *p) 1557 { 1558 struct kvaser_pciefd_can *can; 1559 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; 1560 1561 if (ch_id >= pcie->nr_channels) 1562 return -EIO; 1563 1564 can = pcie->can[ch_id]; 1565 1566 if (!completion_done(&can->flush_comp)) 1567 complete(&can->flush_comp); 1568 1569 return 0; 1570 } 1571 1572 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1573 int dma_buf) 1574 { 1575 __le32 *buffer = pcie->dma_data[dma_buf]; 1576 __le64 timestamp; 1577 struct kvaser_pciefd_rx_packet packet; 1578 struct kvaser_pciefd_rx_packet *p = &packet; 1579 u8 type; 1580 int pos = *start_pos; 1581 int size; 1582 int ret = 0; 1583 1584 size = le32_to_cpu(buffer[pos++]); 1585 if (!size) { 1586 *start_pos = 0; 1587 return 0; 1588 } 1589 1590 p->header[0] = le32_to_cpu(buffer[pos++]); 1591 p->header[1] = le32_to_cpu(buffer[pos++]); 1592 1593 /* Read 64-bit timestamp */ 1594 memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1595 pos += 2; 1596 p->timestamp = le64_to_cpu(timestamp); 1597 1598 type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; 1599 switch (type) { 1600 case KVASER_PCIEFD_PACK_TYPE_DATA: 1601 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1602 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1603 u8 data_len; 1604 1605 data_len = can_fd_dlc2len(p->header[1] >> 1606 KVASER_PCIEFD_RPACKET_DLC_SHIFT); 1607 pos += DIV_ROUND_UP(data_len, 4); 1608 } 1609 break; 1610 1611 case KVASER_PCIEFD_PACK_TYPE_ACK: 1612 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1613 break; 1614 1615 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1616 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1617 break; 1618 1619 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1620 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1621 break; 1622 1623 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1624 ret = kvaser_pciefd_handle_eack_packet(pcie, p); 1625 break; 1626 1627 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1628 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1629 break; 1630 1631 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1632 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1633 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1634 dev_info(&pcie->pci->dev, 1635 "Received unexpected packet type 0x%08X\n", type); 1636 break; 1637 1638 default: 1639 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1640 ret = -EIO; 1641 break; 1642 } 1643 1644 if (ret) 1645 return ret; 1646 1647 /* Position does not point to the end of the package, 1648 * corrupted packet size? 1649 */ 1650 if ((*start_pos + size) != pos) 1651 return -EIO; 1652 1653 /* Point to the next packet header, if any */ 1654 *start_pos = pos; 1655 1656 return ret; 1657 } 1658 1659 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1660 { 1661 int pos = 0; 1662 int res = 0; 1663 1664 do { 1665 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1666 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1667 1668 return res; 1669 } 1670 1671 static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1672 { 1673 u32 irq; 1674 1675 irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1676 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1677 kvaser_pciefd_read_buffer(pcie, 0); 1678 /* Reset DMA buffer 0 */ 1679 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1680 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1681 } 1682 1683 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1684 kvaser_pciefd_read_buffer(pcie, 1); 1685 /* Reset DMA buffer 1 */ 1686 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1687 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1688 } 1689 1690 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1691 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1692 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1693 irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1694 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1695 1696 iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1697 return 0; 1698 } 1699 1700 static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1701 { 1702 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1703 1704 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1705 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1706 1707 if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { 1708 u8 count = ioread32(can->reg_base + 1709 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; 1710 1711 if (count == 0) 1712 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, 1713 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1714 } 1715 1716 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1717 netdev_err(can->can.dev, 1718 "Fail to change bittiming, when not in reset mode\n"); 1719 1720 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1721 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1722 1723 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1724 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1725 1726 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1727 return 0; 1728 } 1729 1730 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1731 { 1732 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1733 u32 board_irq; 1734 int i; 1735 1736 board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1737 1738 if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) 1739 return IRQ_NONE; 1740 1741 if (board_irq & KVASER_PCIEFD_IRQ_SRB) 1742 kvaser_pciefd_receive_irq(pcie); 1743 1744 for (i = 0; i < pcie->nr_channels; i++) { 1745 if (!pcie->can[i]) { 1746 dev_err(&pcie->pci->dev, 1747 "IRQ mask points to unallocated controller\n"); 1748 break; 1749 } 1750 1751 /* Check that mask matches channel (i) IRQ mask */ 1752 if (board_irq & (1 << i)) 1753 kvaser_pciefd_transmit_irq(pcie->can[i]); 1754 } 1755 1756 iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1757 return IRQ_HANDLED; 1758 } 1759 1760 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1761 { 1762 int i; 1763 struct kvaser_pciefd_can *can; 1764 1765 for (i = 0; i < pcie->nr_channels; i++) { 1766 can = pcie->can[i]; 1767 if (can) { 1768 iowrite32(0, 1769 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1770 kvaser_pciefd_pwm_stop(can); 1771 free_candev(can->can.dev); 1772 } 1773 } 1774 } 1775 1776 static int kvaser_pciefd_probe(struct pci_dev *pdev, 1777 const struct pci_device_id *id) 1778 { 1779 int err; 1780 struct kvaser_pciefd *pcie; 1781 1782 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1783 if (!pcie) 1784 return -ENOMEM; 1785 1786 pci_set_drvdata(pdev, pcie); 1787 pcie->pci = pdev; 1788 1789 err = pci_enable_device(pdev); 1790 if (err) 1791 return err; 1792 1793 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1794 if (err) 1795 goto err_disable_pci; 1796 1797 pcie->reg_base = pci_iomap(pdev, 0, 0); 1798 if (!pcie->reg_base) { 1799 err = -ENOMEM; 1800 goto err_release_regions; 1801 } 1802 1803 err = kvaser_pciefd_setup_board(pcie); 1804 if (err) 1805 goto err_pci_iounmap; 1806 1807 err = kvaser_pciefd_setup_dma(pcie); 1808 if (err) 1809 goto err_pci_iounmap; 1810 1811 pci_set_master(pdev); 1812 1813 err = kvaser_pciefd_setup_can_ctrls(pcie); 1814 if (err) 1815 goto err_teardown_can_ctrls; 1816 1817 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1818 pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1819 1820 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1821 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1822 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1823 pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); 1824 1825 /* Reset IRQ handling, expected to be off before */ 1826 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1827 pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1828 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1829 pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1830 1831 /* Ready the DMA buffers */ 1832 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1833 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1834 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1835 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1836 1837 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1838 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1839 if (err) 1840 goto err_teardown_can_ctrls; 1841 1842 err = kvaser_pciefd_reg_candev(pcie); 1843 if (err) 1844 goto err_free_irq; 1845 1846 return 0; 1847 1848 err_free_irq: 1849 free_irq(pcie->pci->irq, pcie); 1850 1851 err_teardown_can_ctrls: 1852 kvaser_pciefd_teardown_can_ctrls(pcie); 1853 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1854 pci_clear_master(pdev); 1855 1856 err_pci_iounmap: 1857 pci_iounmap(pdev, pcie->reg_base); 1858 1859 err_release_regions: 1860 pci_release_regions(pdev); 1861 1862 err_disable_pci: 1863 pci_disable_device(pdev); 1864 1865 return err; 1866 } 1867 1868 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1869 { 1870 struct kvaser_pciefd_can *can; 1871 int i; 1872 1873 for (i = 0; i < pcie->nr_channels; i++) { 1874 can = pcie->can[i]; 1875 if (can) { 1876 iowrite32(0, 1877 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1878 unregister_candev(can->can.dev); 1879 del_timer(&can->bec_poll_timer); 1880 kvaser_pciefd_pwm_stop(can); 1881 free_candev(can->can.dev); 1882 } 1883 } 1884 } 1885 1886 static void kvaser_pciefd_remove(struct pci_dev *pdev) 1887 { 1888 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1889 1890 kvaser_pciefd_remove_all_ctrls(pcie); 1891 1892 /* Turn off IRQ generation */ 1893 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1894 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, 1895 pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1896 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1897 1898 free_irq(pcie->pci->irq, pcie); 1899 1900 pci_clear_master(pdev); 1901 pci_iounmap(pdev, pcie->reg_base); 1902 pci_release_regions(pdev); 1903 pci_disable_device(pdev); 1904 } 1905 1906 static struct pci_driver kvaser_pciefd = { 1907 .name = KVASER_PCIEFD_DRV_NAME, 1908 .id_table = kvaser_pciefd_id_table, 1909 .probe = kvaser_pciefd_probe, 1910 .remove = kvaser_pciefd_remove, 1911 }; 1912 1913 module_pci_driver(kvaser_pciefd) 1914