1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.25) 5 * - PEAK linux canfd driver 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/can/dev.h> 10 #include <linux/device.h> 11 #include <linux/ethtool.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/minmax.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/pci.h> 18 #include <linux/timer.h> 19 20 MODULE_LICENSE("Dual BSD/GPL"); 21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 23 24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 25 26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 28 #define KVASER_PCIEFD_MAX_ERR_REP 256U 29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U 30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL 31 #define KVASER_PCIEFD_DMA_COUNT 2U 32 33 #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) 34 35 #define KVASER_PCIEFD_VENDOR 0x1a07 36 #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d 37 #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e 38 #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f 39 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 40 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 41 42 /* PCIe IRQ registers */ 43 #define KVASER_PCIEFD_IRQ_REG 0x40 44 #define KVASER_PCIEFD_IEN_REG 0x50 45 /* DMA address translation map register base */ 46 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 47 /* Loopback control register */ 48 #define KVASER_PCIEFD_LOOP_REG 0x1f000 49 /* System identification and information registers */ 50 #define KVASER_PCIEFD_SYSID_BASE 0x1f020 51 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) 52 #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) 53 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) 54 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) 55 /* Shared receive buffer registers */ 56 #define KVASER_PCIEFD_SRB_BASE 0x1f200 57 #define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4) 58 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) 59 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) 60 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) 61 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) 62 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) 63 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) 64 /* Kvaser KCAN CAN controller registers */ 65 #define KVASER_PCIEFD_KCAN0_BASE 0x10000 66 #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 67 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 68 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 69 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 70 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 71 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 72 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 73 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 74 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 75 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 76 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 77 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 78 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 79 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 80 81 /* PCI interrupt fields */ 82 #define KVASER_PCIEFD_IRQ_SRB BIT(4) 83 #define KVASER_PCIEFD_IRQ_ALL_MASK GENMASK(4, 0) 84 85 /* Enable 64-bit DMA address translation */ 86 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) 87 88 /* System build information fields */ 89 #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) 90 #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) 91 #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) 92 #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) 93 94 /* Reset DMA buffer 0, 1 and FIFO offset */ 95 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 96 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 97 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 98 99 /* DMA underflow, buffer 0 and 1 */ 100 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 101 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 102 /* DMA overflow, buffer 0 and 1 */ 103 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 104 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 105 /* DMA packet done, buffer 0 and 1 */ 106 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 107 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 108 109 /* Got DMA support */ 110 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 111 /* DMA idle */ 112 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 113 114 /* SRB current packet level */ 115 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) 116 117 /* DMA Enable */ 118 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 119 120 /* KCAN CTRL packet types */ 121 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) 122 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 123 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 124 125 /* Command sequence number */ 126 #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) 127 /* Command bits */ 128 #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) 129 /* Abort, flush and reset */ 130 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 131 /* Request status packet */ 132 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 133 134 /* Transmitter unaligned */ 135 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 136 /* Tx FIFO empty */ 137 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 138 /* Tx FIFO overflow */ 139 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 140 /* Tx buffer flush done */ 141 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 142 /* Abort done */ 143 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 144 /* Rx FIFO overflow */ 145 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 146 /* FDF bit when controller is in classic CAN mode */ 147 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 148 /* Bus parameter protection error */ 149 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 150 /* Tx FIFO unaligned end */ 151 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 152 /* Tx FIFO unaligned read */ 153 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 154 155 /* Tx FIFO size */ 156 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) 157 /* Tx FIFO current packet level */ 158 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) 159 160 /* Current status packet sequence number */ 161 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) 162 /* Controller got CAN FD capability */ 163 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 164 /* Controller got one-shot capability */ 165 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 166 /* Controller in reset mode */ 167 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 168 /* Reset mode request */ 169 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 170 /* Bus off */ 171 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 172 /* Idle state. Controller in reset mode and no abort or flush pending */ 173 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 174 /* Abort request */ 175 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 176 /* Controller is bus off */ 177 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ 178 (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ 179 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) 180 181 /* Classic CAN mode */ 182 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 183 /* Active error flag enable. Clear to force error passive */ 184 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 185 /* Acknowledgment packet type */ 186 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 187 /* CAN FD non-ISO */ 188 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 189 /* Error packet enable */ 190 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 191 /* Listen only mode */ 192 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 193 /* Reset mode */ 194 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 195 196 /* BTRN and BTRD fields */ 197 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) 198 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) 199 #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) 200 #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) 201 202 /* PWM Control fields */ 203 #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) 204 #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) 205 206 /* KCAN packet type IDs */ 207 #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 208 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 209 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 210 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 211 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 212 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 213 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 214 #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 215 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 216 217 /* Common KCAN packet definitions, second word */ 218 #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) 219 #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) 220 #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) 221 222 /* KCAN Transmit/Receive data packet, first word */ 223 #define KVASER_PCIEFD_RPACKET_IDE BIT(30) 224 #define KVASER_PCIEFD_RPACKET_RTR BIT(29) 225 #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) 226 /* KCAN Transmit data packet, second word */ 227 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 228 #define KVASER_PCIEFD_TPACKET_SMS BIT(16) 229 /* KCAN Transmit/Receive data packet, second word */ 230 #define KVASER_PCIEFD_RPACKET_FDF BIT(15) 231 #define KVASER_PCIEFD_RPACKET_BRS BIT(14) 232 #define KVASER_PCIEFD_RPACKET_ESI BIT(13) 233 #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) 234 235 /* KCAN Transmit acknowledge packet, first word */ 236 #define KVASER_PCIEFD_APACKET_NACK BIT(11) 237 #define KVASER_PCIEFD_APACKET_ABL BIT(10) 238 #define KVASER_PCIEFD_APACKET_CT BIT(9) 239 #define KVASER_PCIEFD_APACKET_FLU BIT(8) 240 241 /* KCAN Status packet, first word */ 242 #define KVASER_PCIEFD_SPACK_RMCD BIT(22) 243 #define KVASER_PCIEFD_SPACK_IRM BIT(21) 244 #define KVASER_PCIEFD_SPACK_IDET BIT(20) 245 #define KVASER_PCIEFD_SPACK_BOFF BIT(16) 246 #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) 247 #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) 248 /* KCAN Status packet, second word */ 249 #define KVASER_PCIEFD_SPACK_EPLR BIT(24) 250 #define KVASER_PCIEFD_SPACK_EWLR BIT(23) 251 #define KVASER_PCIEFD_SPACK_AUTO BIT(21) 252 253 /* KCAN Error detected packet, second word */ 254 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 255 256 struct kvaser_pciefd; 257 258 struct kvaser_pciefd_can { 259 struct can_priv can; 260 struct kvaser_pciefd *kv_pcie; 261 void __iomem *reg_base; 262 struct can_berr_counter bec; 263 u8 cmd_seq; 264 int err_rep_cnt; 265 int echo_idx; 266 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 267 spinlock_t echo_lock; /* Locks the message echo buffer */ 268 struct timer_list bec_poll_timer; 269 struct completion start_comp, flush_comp; 270 }; 271 272 struct kvaser_pciefd { 273 struct pci_dev *pci; 274 void __iomem *reg_base; 275 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 276 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 277 u8 nr_channels; 278 u32 bus_freq; 279 u32 freq; 280 u32 freq_to_ticks_div; 281 }; 282 283 struct kvaser_pciefd_rx_packet { 284 u32 header[2]; 285 u64 timestamp; 286 }; 287 288 struct kvaser_pciefd_tx_packet { 289 u32 header[2]; 290 u8 data[64]; 291 }; 292 293 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 294 .name = KVASER_PCIEFD_DRV_NAME, 295 .tseg1_min = 1, 296 .tseg1_max = 512, 297 .tseg2_min = 1, 298 .tseg2_max = 32, 299 .sjw_max = 16, 300 .brp_min = 1, 301 .brp_max = 8192, 302 .brp_inc = 1, 303 }; 304 305 static struct pci_device_id kvaser_pciefd_id_table[] = { 306 { 307 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), 308 }, 309 { 310 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), 311 }, 312 { 313 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), 314 }, 315 { 316 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), 317 }, 318 { 319 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), 320 }, 321 { 322 0, 323 }, 324 }; 325 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 326 327 static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) 328 { 329 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | 330 FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), 331 can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 332 } 333 334 static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 335 { 336 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); 337 } 338 339 static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) 340 { 341 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); 342 } 343 344 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 345 { 346 u32 mode; 347 unsigned long irq; 348 349 spin_lock_irqsave(&can->lock, irq); 350 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 351 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 352 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 353 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 354 } 355 spin_unlock_irqrestore(&can->lock, irq); 356 } 357 358 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 359 { 360 u32 mode; 361 unsigned long irq; 362 363 spin_lock_irqsave(&can->lock, irq); 364 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 365 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 366 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 367 spin_unlock_irqrestore(&can->lock, irq); 368 } 369 370 static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 371 { 372 u32 msk; 373 374 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 375 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 376 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 377 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 378 KVASER_PCIEFD_KCAN_IRQ_TAR; 379 380 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 381 } 382 383 static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, 384 struct sk_buff *skb, u64 timestamp) 385 { 386 skb_hwtstamps(skb)->hwtstamp = 387 ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div)); 388 } 389 390 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 391 { 392 u32 mode; 393 unsigned long irq; 394 395 spin_lock_irqsave(&can->lock, irq); 396 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 397 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 398 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 399 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 400 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 401 else 402 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 403 } else { 404 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 405 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 406 } 407 408 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 409 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 410 else 411 mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; 412 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 413 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 414 /* Use ACK packet type */ 415 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 416 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 417 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 418 419 spin_unlock_irqrestore(&can->lock, irq); 420 } 421 422 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 423 { 424 u32 status; 425 unsigned long irq; 426 427 spin_lock_irqsave(&can->lock, irq); 428 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 429 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 430 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 431 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 432 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 433 /* If controller is already idle, run abort, flush and reset */ 434 kvaser_pciefd_abort_flush_reset(can); 435 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 436 u32 mode; 437 438 /* Put controller in reset mode */ 439 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 440 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 441 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 442 } 443 spin_unlock_irqrestore(&can->lock, irq); 444 } 445 446 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 447 { 448 u32 mode; 449 unsigned long irq; 450 451 del_timer(&can->bec_poll_timer); 452 if (!completion_done(&can->flush_comp)) 453 kvaser_pciefd_start_controller_flush(can); 454 455 if (!wait_for_completion_timeout(&can->flush_comp, 456 KVASER_PCIEFD_WAIT_TIMEOUT)) { 457 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 458 return -ETIMEDOUT; 459 } 460 461 spin_lock_irqsave(&can->lock, irq); 462 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 463 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 464 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 465 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 466 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 467 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 468 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 469 spin_unlock_irqrestore(&can->lock, irq); 470 471 if (!wait_for_completion_timeout(&can->start_comp, 472 KVASER_PCIEFD_WAIT_TIMEOUT)) { 473 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 474 return -ETIMEDOUT; 475 } 476 /* Reset interrupt handling */ 477 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 478 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 479 480 kvaser_pciefd_set_tx_irq(can); 481 kvaser_pciefd_setup_controller(can); 482 can->can.state = CAN_STATE_ERROR_ACTIVE; 483 netif_wake_queue(can->can.dev); 484 can->bec.txerr = 0; 485 can->bec.rxerr = 0; 486 can->err_rep_cnt = 0; 487 488 return 0; 489 } 490 491 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 492 { 493 u8 top; 494 u32 pwm_ctrl; 495 unsigned long irq; 496 497 spin_lock_irqsave(&can->lock, irq); 498 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 499 top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); 500 /* Set duty cycle to zero */ 501 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 502 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 503 spin_unlock_irqrestore(&can->lock, irq); 504 } 505 506 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 507 { 508 int top, trigger; 509 u32 pwm_ctrl; 510 unsigned long irq; 511 512 kvaser_pciefd_pwm_stop(can); 513 spin_lock_irqsave(&can->lock, irq); 514 /* Set frequency to 500 KHz */ 515 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 516 517 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 518 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 519 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 520 521 /* Set duty cycle to 95 */ 522 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 523 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); 524 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 525 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 526 spin_unlock_irqrestore(&can->lock, irq); 527 } 528 529 static int kvaser_pciefd_open(struct net_device *netdev) 530 { 531 int err; 532 struct kvaser_pciefd_can *can = netdev_priv(netdev); 533 534 err = open_candev(netdev); 535 if (err) 536 return err; 537 538 err = kvaser_pciefd_bus_on(can); 539 if (err) { 540 close_candev(netdev); 541 return err; 542 } 543 544 return 0; 545 } 546 547 static int kvaser_pciefd_stop(struct net_device *netdev) 548 { 549 struct kvaser_pciefd_can *can = netdev_priv(netdev); 550 int ret = 0; 551 552 /* Don't interrupt ongoing flush */ 553 if (!completion_done(&can->flush_comp)) 554 kvaser_pciefd_start_controller_flush(can); 555 556 if (!wait_for_completion_timeout(&can->flush_comp, 557 KVASER_PCIEFD_WAIT_TIMEOUT)) { 558 netdev_err(can->can.dev, "Timeout during stop\n"); 559 ret = -ETIMEDOUT; 560 } else { 561 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 562 del_timer(&can->bec_poll_timer); 563 } 564 can->can.state = CAN_STATE_STOPPED; 565 close_candev(netdev); 566 567 return ret; 568 } 569 570 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 571 struct kvaser_pciefd_can *can, 572 struct sk_buff *skb) 573 { 574 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 575 int packet_size; 576 int seq = can->echo_idx; 577 578 memset(p, 0, sizeof(*p)); 579 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 580 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 581 582 if (cf->can_id & CAN_RTR_FLAG) 583 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 584 585 if (cf->can_id & CAN_EFF_FLAG) 586 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 587 588 p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); 589 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 590 591 if (can_is_canfd_skb(skb)) { 592 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 593 can_fd_len2dlc(cf->len)); 594 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 595 if (cf->flags & CANFD_BRS) 596 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 597 if (cf->flags & CANFD_ESI) 598 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 599 } else { 600 p->header[1] |= 601 FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 602 can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); 603 } 604 605 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); 606 607 packet_size = cf->len; 608 memcpy(p->data, cf->data, packet_size); 609 610 return DIV_ROUND_UP(packet_size, 4); 611 } 612 613 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 614 struct net_device *netdev) 615 { 616 struct kvaser_pciefd_can *can = netdev_priv(netdev); 617 unsigned long irq_flags; 618 struct kvaser_pciefd_tx_packet packet; 619 int nr_words; 620 u8 count; 621 622 if (can_dev_dropped_skb(netdev, skb)) 623 return NETDEV_TX_OK; 624 625 nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 626 627 spin_lock_irqsave(&can->echo_lock, irq_flags); 628 /* Prepare and save echo skb in internal slot */ 629 can_put_echo_skb(skb, netdev, can->echo_idx, 0); 630 631 /* Move echo index to the next slot */ 632 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 633 634 /* Write header to fifo */ 635 iowrite32(packet.header[0], 636 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 637 iowrite32(packet.header[1], 638 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 639 640 if (nr_words) { 641 u32 data_last = ((u32 *)packet.data)[nr_words - 1]; 642 643 /* Write data to fifo, except last word */ 644 iowrite32_rep(can->reg_base + 645 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 646 nr_words - 1); 647 /* Write last word to end of fifo */ 648 __raw_writel(data_last, can->reg_base + 649 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 650 } else { 651 /* Complete write to fifo */ 652 __raw_writel(0, can->reg_base + 653 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 654 } 655 656 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 657 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 658 /* No room for a new message, stop the queue until at least one 659 * successful transmit 660 */ 661 if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) 662 netif_stop_queue(netdev); 663 spin_unlock_irqrestore(&can->echo_lock, irq_flags); 664 665 return NETDEV_TX_OK; 666 } 667 668 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 669 { 670 u32 mode, test, btrn; 671 unsigned long irq_flags; 672 int ret; 673 struct can_bittiming *bt; 674 675 if (data) 676 bt = &can->can.data_bittiming; 677 else 678 bt = &can->can.bittiming; 679 680 btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | 681 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | 682 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | 683 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); 684 685 spin_lock_irqsave(&can->lock, irq_flags); 686 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 687 /* Put the circuit in reset mode */ 688 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 689 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 690 691 /* Can only set bittiming if in reset mode */ 692 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 693 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); 694 if (ret) { 695 spin_unlock_irqrestore(&can->lock, irq_flags); 696 return -EBUSY; 697 } 698 699 if (data) 700 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 701 else 702 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 703 /* Restore previous reset mode status */ 704 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 705 spin_unlock_irqrestore(&can->lock, irq_flags); 706 707 return 0; 708 } 709 710 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 711 { 712 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 713 } 714 715 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 716 { 717 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 718 } 719 720 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 721 { 722 struct kvaser_pciefd_can *can = netdev_priv(ndev); 723 int ret = 0; 724 725 switch (mode) { 726 case CAN_MODE_START: 727 if (!can->can.restart_ms) 728 ret = kvaser_pciefd_bus_on(can); 729 break; 730 default: 731 return -EOPNOTSUPP; 732 } 733 734 return ret; 735 } 736 737 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 738 struct can_berr_counter *bec) 739 { 740 struct kvaser_pciefd_can *can = netdev_priv(ndev); 741 742 bec->rxerr = can->bec.rxerr; 743 bec->txerr = can->bec.txerr; 744 745 return 0; 746 } 747 748 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 749 { 750 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 751 752 kvaser_pciefd_enable_err_gen(can); 753 kvaser_pciefd_request_status(can); 754 can->err_rep_cnt = 0; 755 } 756 757 static const struct net_device_ops kvaser_pciefd_netdev_ops = { 758 .ndo_open = kvaser_pciefd_open, 759 .ndo_stop = kvaser_pciefd_stop, 760 .ndo_eth_ioctl = can_eth_ioctl_hwts, 761 .ndo_start_xmit = kvaser_pciefd_start_xmit, 762 .ndo_change_mtu = can_change_mtu, 763 }; 764 765 static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { 766 .get_ts_info = can_ethtool_op_get_ts_info_hwts, 767 }; 768 769 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 770 { 771 int i; 772 773 for (i = 0; i < pcie->nr_channels; i++) { 774 struct net_device *netdev; 775 struct kvaser_pciefd_can *can; 776 u32 status, tx_nr_packets_max; 777 778 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 779 KVASER_PCIEFD_CAN_TX_MAX_COUNT); 780 if (!netdev) 781 return -ENOMEM; 782 783 can = netdev_priv(netdev); 784 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 785 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; 786 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + 787 i * KVASER_PCIEFD_KCAN_BASE_OFFSET; 788 can->kv_pcie = pcie; 789 can->cmd_seq = 0; 790 can->err_rep_cnt = 0; 791 can->bec.txerr = 0; 792 can->bec.rxerr = 0; 793 794 init_completion(&can->start_comp); 795 init_completion(&can->flush_comp); 796 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); 797 798 /* Disable Bus load reporting */ 799 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 800 801 tx_nr_packets_max = 802 FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, 803 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 804 805 can->can.clock.freq = pcie->freq; 806 can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); 807 can->echo_idx = 0; 808 spin_lock_init(&can->echo_lock); 809 spin_lock_init(&can->lock); 810 811 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 812 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 813 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 814 can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; 815 can->can.do_set_mode = kvaser_pciefd_set_mode; 816 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 817 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 818 CAN_CTRLMODE_FD | 819 CAN_CTRLMODE_FD_NON_ISO | 820 CAN_CTRLMODE_CC_LEN8_DLC; 821 822 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 823 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 824 dev_err(&pcie->pci->dev, 825 "CAN FD not supported as expected %d\n", i); 826 827 free_candev(netdev); 828 return -ENODEV; 829 } 830 831 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 832 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 833 834 netdev->flags |= IFF_ECHO; 835 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 836 837 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 838 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 839 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 840 841 pcie->can[i] = can; 842 kvaser_pciefd_pwm_start(can); 843 } 844 845 return 0; 846 } 847 848 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 849 { 850 int i; 851 852 for (i = 0; i < pcie->nr_channels; i++) { 853 int err = register_candev(pcie->can[i]->can.dev); 854 855 if (err) { 856 int j; 857 858 /* Unregister all successfully registered devices. */ 859 for (j = 0; j < i; j++) 860 unregister_candev(pcie->can[j]->can.dev); 861 return err; 862 } 863 } 864 865 return 0; 866 } 867 868 static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, 869 dma_addr_t addr, int offset) 870 { 871 u32 word1, word2; 872 873 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 874 word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; 875 word2 = addr >> 32; 876 #else 877 word1 = addr; 878 word2 = 0; 879 #endif 880 iowrite32(word1, pcie->reg_base + offset); 881 iowrite32(word2, pcie->reg_base + offset + 4); 882 } 883 884 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 885 { 886 int i; 887 u32 srb_status; 888 u32 srb_packet_count; 889 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 890 891 /* Disable the DMA */ 892 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 893 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 894 unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; 895 896 pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, 897 KVASER_PCIEFD_DMA_SIZE, 898 &dma_addr[i], 899 GFP_KERNEL); 900 901 if (!pcie->dma_data[i] || !dma_addr[i]) { 902 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 903 KVASER_PCIEFD_DMA_SIZE); 904 return -ENOMEM; 905 } 906 kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); 907 } 908 909 /* Reset Rx FIFO, and both DMA buffers */ 910 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 911 KVASER_PCIEFD_SRB_CMD_RDB1, 912 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 913 /* Empty Rx FIFO */ 914 srb_packet_count = 915 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, 916 ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); 917 while (srb_packet_count) { 918 /* Drop current packet in FIFO */ 919 ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 920 srb_packet_count--; 921 } 922 923 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 924 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 925 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 926 return -EIO; 927 } 928 929 /* Enable the DMA */ 930 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 931 pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 932 933 return 0; 934 } 935 936 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 937 { 938 u32 version, srb_status, build; 939 940 version = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); 941 pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, 942 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); 943 944 build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); 945 dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", 946 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), 947 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), 948 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); 949 950 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 951 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 952 dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); 953 return -ENODEV; 954 } 955 956 pcie->bus_freq = ioread32(pcie->reg_base + 957 KVASER_PCIEFD_SYSID_BUSFREQ_REG); 958 pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); 959 pcie->freq_to_ticks_div = pcie->freq / 1000000; 960 if (pcie->freq_to_ticks_div == 0) 961 pcie->freq_to_ticks_div = 1; 962 /* Turn off all loopback functionality */ 963 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); 964 965 return 0; 966 } 967 968 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 969 struct kvaser_pciefd_rx_packet *p, 970 __le32 *data) 971 { 972 struct sk_buff *skb; 973 struct canfd_frame *cf; 974 struct can_priv *priv; 975 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 976 u8 dlc; 977 978 if (ch_id >= pcie->nr_channels) 979 return -EIO; 980 981 priv = &pcie->can[ch_id]->can; 982 dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); 983 984 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 985 skb = alloc_canfd_skb(priv->dev, &cf); 986 if (!skb) { 987 priv->dev->stats.rx_dropped++; 988 return -ENOMEM; 989 } 990 991 cf->len = can_fd_dlc2len(dlc); 992 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 993 cf->flags |= CANFD_BRS; 994 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 995 cf->flags |= CANFD_ESI; 996 } else { 997 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 998 if (!skb) { 999 priv->dev->stats.rx_dropped++; 1000 return -ENOMEM; 1001 } 1002 can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); 1003 } 1004 1005 cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); 1006 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1007 cf->can_id |= CAN_EFF_FLAG; 1008 1009 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { 1010 cf->can_id |= CAN_RTR_FLAG; 1011 } else { 1012 memcpy(cf->data, data, cf->len); 1013 priv->dev->stats.rx_bytes += cf->len; 1014 } 1015 priv->dev->stats.rx_packets++; 1016 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1017 1018 return netif_rx(skb); 1019 } 1020 1021 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1022 struct can_frame *cf, 1023 enum can_state new_state, 1024 enum can_state tx_state, 1025 enum can_state rx_state) 1026 { 1027 can_change_state(can->can.dev, cf, tx_state, rx_state); 1028 1029 if (new_state == CAN_STATE_BUS_OFF) { 1030 struct net_device *ndev = can->can.dev; 1031 unsigned long irq_flags; 1032 1033 spin_lock_irqsave(&can->lock, irq_flags); 1034 netif_stop_queue(can->can.dev); 1035 spin_unlock_irqrestore(&can->lock, irq_flags); 1036 /* Prevent CAN controller from auto recover from bus off */ 1037 if (!can->can.restart_ms) { 1038 kvaser_pciefd_start_controller_flush(can); 1039 can_bus_off(ndev); 1040 } 1041 } 1042 } 1043 1044 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1045 struct can_berr_counter *bec, 1046 enum can_state *new_state, 1047 enum can_state *tx_state, 1048 enum can_state *rx_state) 1049 { 1050 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1051 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1052 *new_state = CAN_STATE_BUS_OFF; 1053 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1054 *new_state = CAN_STATE_BUS_OFF; 1055 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1056 *new_state = CAN_STATE_ERROR_PASSIVE; 1057 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1058 *new_state = CAN_STATE_ERROR_PASSIVE; 1059 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1060 *new_state = CAN_STATE_ERROR_WARNING; 1061 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1062 *new_state = CAN_STATE_ERROR_WARNING; 1063 else 1064 *new_state = CAN_STATE_ERROR_ACTIVE; 1065 1066 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1067 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1068 } 1069 1070 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1071 struct kvaser_pciefd_rx_packet *p) 1072 { 1073 struct can_berr_counter bec; 1074 enum can_state old_state, new_state, tx_state, rx_state; 1075 struct net_device *ndev = can->can.dev; 1076 struct sk_buff *skb; 1077 struct can_frame *cf = NULL; 1078 1079 old_state = can->can.state; 1080 1081 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1082 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1083 1084 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1085 skb = alloc_can_err_skb(ndev, &cf); 1086 if (new_state != old_state) { 1087 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); 1088 if (old_state == CAN_STATE_BUS_OFF && 1089 new_state == CAN_STATE_ERROR_ACTIVE && 1090 can->can.restart_ms) { 1091 can->can.can_stats.restarts++; 1092 if (skb) 1093 cf->can_id |= CAN_ERR_RESTARTED; 1094 } 1095 } 1096 1097 can->err_rep_cnt++; 1098 can->can.can_stats.bus_error++; 1099 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) 1100 ndev->stats.tx_errors++; 1101 else 1102 ndev->stats.rx_errors++; 1103 1104 can->bec.txerr = bec.txerr; 1105 can->bec.rxerr = bec.rxerr; 1106 1107 if (!skb) { 1108 ndev->stats.rx_dropped++; 1109 return -ENOMEM; 1110 } 1111 1112 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1113 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; 1114 cf->data[6] = bec.txerr; 1115 cf->data[7] = bec.rxerr; 1116 1117 netif_rx(skb); 1118 1119 return 0; 1120 } 1121 1122 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1123 struct kvaser_pciefd_rx_packet *p) 1124 { 1125 struct kvaser_pciefd_can *can; 1126 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1127 1128 if (ch_id >= pcie->nr_channels) 1129 return -EIO; 1130 1131 can = pcie->can[ch_id]; 1132 kvaser_pciefd_rx_error_frame(can, p); 1133 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1134 /* Do not report more errors, until bec_poll_timer expires */ 1135 kvaser_pciefd_disable_err_gen(can); 1136 /* Start polling the error counters */ 1137 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1138 1139 return 0; 1140 } 1141 1142 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1143 struct kvaser_pciefd_rx_packet *p) 1144 { 1145 struct can_berr_counter bec; 1146 enum can_state old_state, new_state, tx_state, rx_state; 1147 1148 old_state = can->can.state; 1149 1150 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1151 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1152 1153 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1154 if (new_state != old_state) { 1155 struct net_device *ndev = can->can.dev; 1156 struct sk_buff *skb; 1157 struct can_frame *cf; 1158 1159 skb = alloc_can_err_skb(ndev, &cf); 1160 if (!skb) { 1161 ndev->stats.rx_dropped++; 1162 return -ENOMEM; 1163 } 1164 1165 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); 1166 if (old_state == CAN_STATE_BUS_OFF && 1167 new_state == CAN_STATE_ERROR_ACTIVE && 1168 can->can.restart_ms) { 1169 can->can.can_stats.restarts++; 1170 cf->can_id |= CAN_ERR_RESTARTED; 1171 } 1172 1173 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1174 1175 cf->data[6] = bec.txerr; 1176 cf->data[7] = bec.rxerr; 1177 1178 netif_rx(skb); 1179 } 1180 can->bec.txerr = bec.txerr; 1181 can->bec.rxerr = bec.rxerr; 1182 /* Check if we need to poll the error counters */ 1183 if (bec.txerr || bec.rxerr) 1184 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1185 1186 return 0; 1187 } 1188 1189 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1190 struct kvaser_pciefd_rx_packet *p) 1191 { 1192 struct kvaser_pciefd_can *can; 1193 u8 cmdseq; 1194 u32 status; 1195 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1196 1197 if (ch_id >= pcie->nr_channels) 1198 return -EIO; 1199 1200 can = pcie->can[ch_id]; 1201 1202 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1203 cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); 1204 1205 /* Reset done, start abort and flush */ 1206 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1207 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1208 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1209 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1210 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1211 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1212 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1213 kvaser_pciefd_abort_flush_reset(can); 1214 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1215 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1216 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1217 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1218 /* Reset detected, send end of flush if no packet are in FIFO */ 1219 u8 count; 1220 1221 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1222 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1223 if (!count) 1224 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, 1225 KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), 1226 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1227 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1228 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { 1229 /* Response to status request received */ 1230 kvaser_pciefd_handle_status_resp(can, p); 1231 if (can->can.state != CAN_STATE_BUS_OFF && 1232 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1233 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1234 } 1235 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1236 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { 1237 /* Reset to bus on detected */ 1238 if (!completion_done(&can->start_comp)) 1239 complete(&can->start_comp); 1240 } 1241 1242 return 0; 1243 } 1244 1245 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1246 struct kvaser_pciefd_rx_packet *p) 1247 { 1248 struct sk_buff *skb; 1249 struct can_frame *cf; 1250 1251 skb = alloc_can_err_skb(can->can.dev, &cf); 1252 can->can.dev->stats.tx_errors++; 1253 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1254 if (skb) 1255 cf->can_id |= CAN_ERR_LOSTARB; 1256 can->can.can_stats.arbitration_lost++; 1257 } else if (skb) { 1258 cf->can_id |= CAN_ERR_ACK; 1259 } 1260 1261 if (skb) { 1262 cf->can_id |= CAN_ERR_BUSERROR; 1263 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1264 netif_rx(skb); 1265 } else { 1266 can->can.dev->stats.rx_dropped++; 1267 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1268 } 1269 } 1270 1271 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1272 struct kvaser_pciefd_rx_packet *p) 1273 { 1274 struct kvaser_pciefd_can *can; 1275 bool one_shot_fail = false; 1276 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1277 1278 if (ch_id >= pcie->nr_channels) 1279 return -EIO; 1280 1281 can = pcie->can[ch_id]; 1282 /* Ignore control packet ACK */ 1283 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1284 return 0; 1285 1286 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1287 kvaser_pciefd_handle_nack_packet(can, p); 1288 one_shot_fail = true; 1289 } 1290 1291 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1292 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1293 } else { 1294 int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); 1295 int len; 1296 u8 count; 1297 struct sk_buff *skb; 1298 1299 skb = can->can.echo_skb[echo_idx]; 1300 if (skb) 1301 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1302 len = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1303 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1304 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1305 1306 if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) 1307 netif_wake_queue(can->can.dev); 1308 1309 if (!one_shot_fail) { 1310 can->can.dev->stats.tx_bytes += len; 1311 can->can.dev->stats.tx_packets++; 1312 } 1313 } 1314 1315 return 0; 1316 } 1317 1318 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1319 struct kvaser_pciefd_rx_packet *p) 1320 { 1321 struct kvaser_pciefd_can *can; 1322 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1323 1324 if (ch_id >= pcie->nr_channels) 1325 return -EIO; 1326 1327 can = pcie->can[ch_id]; 1328 1329 if (!completion_done(&can->flush_comp)) 1330 complete(&can->flush_comp); 1331 1332 return 0; 1333 } 1334 1335 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1336 int dma_buf) 1337 { 1338 __le32 *buffer = pcie->dma_data[dma_buf]; 1339 __le64 timestamp; 1340 struct kvaser_pciefd_rx_packet packet; 1341 struct kvaser_pciefd_rx_packet *p = &packet; 1342 u8 type; 1343 int pos = *start_pos; 1344 int size; 1345 int ret = 0; 1346 1347 size = le32_to_cpu(buffer[pos++]); 1348 if (!size) { 1349 *start_pos = 0; 1350 return 0; 1351 } 1352 1353 p->header[0] = le32_to_cpu(buffer[pos++]); 1354 p->header[1] = le32_to_cpu(buffer[pos++]); 1355 1356 /* Read 64-bit timestamp */ 1357 memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1358 pos += 2; 1359 p->timestamp = le64_to_cpu(timestamp); 1360 1361 type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); 1362 switch (type) { 1363 case KVASER_PCIEFD_PACK_TYPE_DATA: 1364 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1365 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1366 u8 data_len; 1367 1368 data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, 1369 p->header[1])); 1370 pos += DIV_ROUND_UP(data_len, 4); 1371 } 1372 break; 1373 1374 case KVASER_PCIEFD_PACK_TYPE_ACK: 1375 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1376 break; 1377 1378 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1379 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1380 break; 1381 1382 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1383 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1384 break; 1385 1386 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1387 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1388 break; 1389 1390 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1391 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1392 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1393 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1394 dev_info(&pcie->pci->dev, 1395 "Received unexpected packet type 0x%08X\n", type); 1396 break; 1397 1398 default: 1399 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1400 ret = -EIO; 1401 break; 1402 } 1403 1404 if (ret) 1405 return ret; 1406 1407 /* Position does not point to the end of the package, 1408 * corrupted packet size? 1409 */ 1410 if ((*start_pos + size) != pos) 1411 return -EIO; 1412 1413 /* Point to the next packet header, if any */ 1414 *start_pos = pos; 1415 1416 return ret; 1417 } 1418 1419 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1420 { 1421 int pos = 0; 1422 int res = 0; 1423 1424 do { 1425 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1426 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1427 1428 return res; 1429 } 1430 1431 static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1432 { 1433 u32 irq; 1434 1435 irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1436 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1437 kvaser_pciefd_read_buffer(pcie, 0); 1438 /* Reset DMA buffer 0 */ 1439 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1440 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1441 } 1442 1443 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1444 kvaser_pciefd_read_buffer(pcie, 1); 1445 /* Reset DMA buffer 1 */ 1446 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1447 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1448 } 1449 1450 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1451 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1452 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1453 irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1454 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1455 1456 iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1457 } 1458 1459 static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1460 { 1461 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1462 1463 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1464 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1465 1466 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1467 netdev_err(can->can.dev, 1468 "Fail to change bittiming, when not in reset mode\n"); 1469 1470 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1471 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1472 1473 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1474 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1475 1476 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1477 } 1478 1479 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1480 { 1481 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1482 u32 board_irq; 1483 int i; 1484 1485 board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1486 1487 if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MASK)) 1488 return IRQ_NONE; 1489 1490 if (board_irq & KVASER_PCIEFD_IRQ_SRB) 1491 kvaser_pciefd_receive_irq(pcie); 1492 1493 for (i = 0; i < pcie->nr_channels; i++) { 1494 if (!pcie->can[i]) { 1495 dev_err(&pcie->pci->dev, 1496 "IRQ mask points to unallocated controller\n"); 1497 break; 1498 } 1499 1500 /* Check that mask matches channel (i) IRQ mask */ 1501 if (board_irq & (1 << i)) 1502 kvaser_pciefd_transmit_irq(pcie->can[i]); 1503 } 1504 1505 return IRQ_HANDLED; 1506 } 1507 1508 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1509 { 1510 int i; 1511 1512 for (i = 0; i < pcie->nr_channels; i++) { 1513 struct kvaser_pciefd_can *can = pcie->can[i]; 1514 1515 if (can) { 1516 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1517 kvaser_pciefd_pwm_stop(can); 1518 free_candev(can->can.dev); 1519 } 1520 } 1521 } 1522 1523 static int kvaser_pciefd_probe(struct pci_dev *pdev, 1524 const struct pci_device_id *id) 1525 { 1526 int err; 1527 struct kvaser_pciefd *pcie; 1528 1529 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1530 if (!pcie) 1531 return -ENOMEM; 1532 1533 pci_set_drvdata(pdev, pcie); 1534 pcie->pci = pdev; 1535 1536 err = pci_enable_device(pdev); 1537 if (err) 1538 return err; 1539 1540 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1541 if (err) 1542 goto err_disable_pci; 1543 1544 pcie->reg_base = pci_iomap(pdev, 0, 0); 1545 if (!pcie->reg_base) { 1546 err = -ENOMEM; 1547 goto err_release_regions; 1548 } 1549 1550 err = kvaser_pciefd_setup_board(pcie); 1551 if (err) 1552 goto err_pci_iounmap; 1553 1554 err = kvaser_pciefd_setup_dma(pcie); 1555 if (err) 1556 goto err_pci_iounmap; 1557 1558 pci_set_master(pdev); 1559 1560 err = kvaser_pciefd_setup_can_ctrls(pcie); 1561 if (err) 1562 goto err_teardown_can_ctrls; 1563 1564 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1565 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1566 if (err) 1567 goto err_teardown_can_ctrls; 1568 1569 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1570 pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1571 1572 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1573 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1574 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1575 pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); 1576 1577 /* Enable PCI interrupts */ 1578 iowrite32(KVASER_PCIEFD_IRQ_ALL_MASK, 1579 pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1580 1581 /* Ready the DMA buffers */ 1582 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1583 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1584 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1585 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1586 1587 err = kvaser_pciefd_reg_candev(pcie); 1588 if (err) 1589 goto err_free_irq; 1590 1591 return 0; 1592 1593 err_free_irq: 1594 /* Disable PCI interrupts */ 1595 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1596 free_irq(pcie->pci->irq, pcie); 1597 1598 err_teardown_can_ctrls: 1599 kvaser_pciefd_teardown_can_ctrls(pcie); 1600 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1601 pci_clear_master(pdev); 1602 1603 err_pci_iounmap: 1604 pci_iounmap(pdev, pcie->reg_base); 1605 1606 err_release_regions: 1607 pci_release_regions(pdev); 1608 1609 err_disable_pci: 1610 pci_disable_device(pdev); 1611 1612 return err; 1613 } 1614 1615 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1616 { 1617 int i; 1618 1619 for (i = 0; i < pcie->nr_channels; i++) { 1620 struct kvaser_pciefd_can *can = pcie->can[i]; 1621 1622 if (can) { 1623 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1624 unregister_candev(can->can.dev); 1625 del_timer(&can->bec_poll_timer); 1626 kvaser_pciefd_pwm_stop(can); 1627 free_candev(can->can.dev); 1628 } 1629 } 1630 } 1631 1632 static void kvaser_pciefd_remove(struct pci_dev *pdev) 1633 { 1634 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1635 1636 kvaser_pciefd_remove_all_ctrls(pcie); 1637 1638 /* Disable interrupts */ 1639 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1640 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1641 1642 free_irq(pcie->pci->irq, pcie); 1643 1644 pci_iounmap(pdev, pcie->reg_base); 1645 pci_release_regions(pdev); 1646 pci_disable_device(pdev); 1647 } 1648 1649 static struct pci_driver kvaser_pciefd = { 1650 .name = KVASER_PCIEFD_DRV_NAME, 1651 .id_table = kvaser_pciefd_id_table, 1652 .probe = kvaser_pciefd_probe, 1653 .remove = kvaser_pciefd_remove, 1654 }; 1655 1656 module_pci_driver(kvaser_pciefd) 1657