1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xilinx CAN device driver 3 * 4 * Copyright (C) 2012 - 2022 Xilinx, Inc. 5 * Copyright (C) 2009 PetaLogix. All rights reserved. 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy 7 * 8 * Description: 9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/netdevice.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/platform_device.h> 24 #include <linux/skbuff.h> 25 #include <linux/spinlock.h> 26 #include <linux/string.h> 27 #include <linux/types.h> 28 #include <linux/can/dev.h> 29 #include <linux/can/error.h> 30 #include <linux/pm_runtime.h> 31 32 #define DRIVER_NAME "xilinx_can" 33 34 /* CAN registers set */ 35 enum xcan_reg { 36 XCAN_SRR_OFFSET = 0x00, /* Software reset */ 37 XCAN_MSR_OFFSET = 0x04, /* Mode select */ 38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ 39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ 40 XCAN_ECR_OFFSET = 0x10, /* Error counter */ 41 XCAN_ESR_OFFSET = 0x14, /* Error status */ 42 XCAN_SR_OFFSET = 0x18, /* Status */ 43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 46 47 /* not on CAN FD cores */ 48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ 49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ 50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ 51 52 /* only on CAN FD cores */ 53 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate 54 * Prescaler 55 */ 56 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ 57 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ 58 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ 59 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ 60 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ 61 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ 62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ 63 XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ 64 XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ 65 }; 66 67 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) 68 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) 69 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) 70 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) 71 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) 72 73 #define XCAN_CANFD_FRAME_SIZE 0x48 74 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ 75 XCAN_CANFD_FRAME_SIZE * (n)) 76 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ 77 XCAN_CANFD_FRAME_SIZE * (n)) 78 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ 79 XCAN_CANFD_FRAME_SIZE * (n)) 80 81 /* the single TX mailbox used by this driver on CAN FD HW */ 82 #define XCAN_TX_MAILBOX_IDX 0 83 84 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 85 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 86 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 87 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ 88 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ 89 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ 90 #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */ 91 #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */ 92 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 93 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 94 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 95 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ 96 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ 97 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ 98 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 99 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 100 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 101 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ 102 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ 103 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ 104 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ 105 #define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */ 106 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ 107 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ 108 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ 109 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 110 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 111 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 112 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ 113 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 114 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 115 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 116 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ 117 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ 118 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ 119 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ 120 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ 121 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ 122 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ 123 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ 124 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ 125 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ 126 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ 127 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 128 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 129 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 130 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ 131 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ 132 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ 133 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ 134 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ 135 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ 136 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ 137 138 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 139 #define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */ 140 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 141 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 142 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ 143 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ 144 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 145 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 146 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 147 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ 148 149 /* CAN frame length constants */ 150 #define XCAN_FRAME_MAX_DATA_LEN 8 151 #define XCANFD_DW_BYTES 4 152 #define XCAN_TIMEOUT (1 * HZ) 153 154 /* TX-FIFO-empty interrupt available */ 155 #define XCAN_FLAG_TXFEMP 0x0001 156 /* RX Match Not Finished interrupt available */ 157 #define XCAN_FLAG_RXMNF 0x0002 158 /* Extended acceptance filters with control at 0xE0 */ 159 #define XCAN_FLAG_EXT_FILTERS 0x0004 160 /* TX mailboxes instead of TX FIFO */ 161 #define XCAN_FLAG_TX_MAILBOXES 0x0008 162 /* RX FIFO with each buffer in separate registers at 0x1100 163 * instead of the regular FIFO at 0x50 164 */ 165 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 166 #define XCAN_FLAG_CANFD_2 0x0020 167 168 enum xcan_ip_type { 169 XAXI_CAN = 0, 170 XZYNQ_CANPS, 171 XAXI_CANFD, 172 XAXI_CANFD_2_0, 173 }; 174 175 struct xcan_devtype_data { 176 enum xcan_ip_type cantype; 177 unsigned int flags; 178 const struct can_bittiming_const *bittiming_const; 179 const char *bus_clk_name; 180 unsigned int btr_ts2_shift; 181 unsigned int btr_sjw_shift; 182 }; 183 184 /** 185 * struct xcan_priv - This definition define CAN driver instance 186 * @can: CAN private data structure. 187 * @tx_lock: Lock for synchronizing TX interrupt handling 188 * @tx_head: Tx CAN packets ready to send on the queue 189 * @tx_tail: Tx CAN packets successfully sended on the queue 190 * @tx_max: Maximum number packets the driver can send 191 * @napi: NAPI structure 192 * @read_reg: For reading data from CAN registers 193 * @write_reg: For writing data to CAN registers 194 * @dev: Network device data structure 195 * @reg_base: Ioremapped address to registers 196 * @irq_flags: For request_irq() 197 * @bus_clk: Pointer to struct clk 198 * @can_clk: Pointer to struct clk 199 * @devtype: Device type specific constants 200 */ 201 struct xcan_priv { 202 struct can_priv can; 203 spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ 204 unsigned int tx_head; 205 unsigned int tx_tail; 206 unsigned int tx_max; 207 struct napi_struct napi; 208 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); 209 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, 210 u32 val); 211 struct device *dev; 212 void __iomem *reg_base; 213 unsigned long irq_flags; 214 struct clk *bus_clk; 215 struct clk *can_clk; 216 struct xcan_devtype_data devtype; 217 }; 218 219 /* CAN Bittiming constants as per Xilinx CAN specs */ 220 static const struct can_bittiming_const xcan_bittiming_const = { 221 .name = DRIVER_NAME, 222 .tseg1_min = 1, 223 .tseg1_max = 16, 224 .tseg2_min = 1, 225 .tseg2_max = 8, 226 .sjw_max = 4, 227 .brp_min = 1, 228 .brp_max = 256, 229 .brp_inc = 1, 230 }; 231 232 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ 233 static const struct can_bittiming_const xcan_bittiming_const_canfd = { 234 .name = DRIVER_NAME, 235 .tseg1_min = 1, 236 .tseg1_max = 64, 237 .tseg2_min = 1, 238 .tseg2_max = 16, 239 .sjw_max = 16, 240 .brp_min = 1, 241 .brp_max = 256, 242 .brp_inc = 1, 243 }; 244 245 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ 246 static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { 247 .name = DRIVER_NAME, 248 .tseg1_min = 1, 249 .tseg1_max = 16, 250 .tseg2_min = 1, 251 .tseg2_max = 8, 252 .sjw_max = 8, 253 .brp_min = 1, 254 .brp_max = 256, 255 .brp_inc = 1, 256 }; 257 258 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ 259 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { 260 .name = DRIVER_NAME, 261 .tseg1_min = 1, 262 .tseg1_max = 256, 263 .tseg2_min = 1, 264 .tseg2_max = 128, 265 .sjw_max = 128, 266 .brp_min = 2, 267 .brp_max = 256, 268 .brp_inc = 1, 269 }; 270 271 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ 272 static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { 273 .name = DRIVER_NAME, 274 .tseg1_min = 1, 275 .tseg1_max = 32, 276 .tseg2_min = 1, 277 .tseg2_max = 16, 278 .sjw_max = 16, 279 .brp_min = 2, 280 .brp_max = 256, 281 .brp_inc = 1, 282 }; 283 284 /* Transmission Delay Compensation constants for CANFD 1.0 */ 285 static const struct can_tdc_const xcan_tdc_const_canfd = { 286 .tdcv_min = 0, 287 .tdcv_max = 0, /* Manual mode not supported. */ 288 .tdco_min = 0, 289 .tdco_max = 32, 290 .tdcf_min = 0, /* Filter window not supported */ 291 .tdcf_max = 0, 292 }; 293 294 /* Transmission Delay Compensation constants for CANFD 2.0 */ 295 static const struct can_tdc_const xcan_tdc_const_canfd2 = { 296 .tdcv_min = 0, 297 .tdcv_max = 0, /* Manual mode not supported. */ 298 .tdco_min = 0, 299 .tdco_max = 64, 300 .tdcf_min = 0, /* Filter window not supported */ 301 .tdcf_max = 0, 302 }; 303 304 /** 305 * xcan_write_reg_le - Write a value to the device register little endian 306 * @priv: Driver private data structure 307 * @reg: Register offset 308 * @val: Value to write at the Register offset 309 * 310 * Write data to the paricular CAN register 311 */ 312 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, 313 u32 val) 314 { 315 iowrite32(val, priv->reg_base + reg); 316 } 317 318 /** 319 * xcan_read_reg_le - Read a value from the device register little endian 320 * @priv: Driver private data structure 321 * @reg: Register offset 322 * 323 * Read data from the particular CAN register 324 * Return: value read from the CAN register 325 */ 326 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) 327 { 328 return ioread32(priv->reg_base + reg); 329 } 330 331 /** 332 * xcan_write_reg_be - Write a value to the device register big endian 333 * @priv: Driver private data structure 334 * @reg: Register offset 335 * @val: Value to write at the Register offset 336 * 337 * Write data to the paricular CAN register 338 */ 339 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, 340 u32 val) 341 { 342 iowrite32be(val, priv->reg_base + reg); 343 } 344 345 /** 346 * xcan_read_reg_be - Read a value from the device register big endian 347 * @priv: Driver private data structure 348 * @reg: Register offset 349 * 350 * Read data from the particular CAN register 351 * Return: value read from the CAN register 352 */ 353 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) 354 { 355 return ioread32be(priv->reg_base + reg); 356 } 357 358 /** 359 * xcan_rx_int_mask - Get the mask for the receive interrupt 360 * @priv: Driver private data structure 361 * 362 * Return: The receive interrupt mask used by the driver on this HW 363 */ 364 static u32 xcan_rx_int_mask(const struct xcan_priv *priv) 365 { 366 /* RXNEMP is better suited for our use case as it cannot be cleared 367 * while the FIFO is non-empty, but CAN FD HW does not have it 368 */ 369 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 370 return XCAN_IXR_RXOK_MASK; 371 else 372 return XCAN_IXR_RXNEMP_MASK; 373 } 374 375 /** 376 * set_reset_mode - Resets the CAN device mode 377 * @ndev: Pointer to net_device structure 378 * 379 * This is the driver reset mode routine.The driver 380 * enters into configuration mode. 381 * 382 * Return: 0 on success and failure value on error 383 */ 384 static int set_reset_mode(struct net_device *ndev) 385 { 386 struct xcan_priv *priv = netdev_priv(ndev); 387 unsigned long timeout; 388 389 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 390 391 timeout = jiffies + XCAN_TIMEOUT; 392 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { 393 if (time_after(jiffies, timeout)) { 394 netdev_warn(ndev, "timed out for config mode\n"); 395 return -ETIMEDOUT; 396 } 397 usleep_range(500, 10000); 398 } 399 400 /* reset clears FIFOs */ 401 priv->tx_head = 0; 402 priv->tx_tail = 0; 403 404 return 0; 405 } 406 407 /** 408 * xcan_set_bittiming - CAN set bit timing routine 409 * @ndev: Pointer to net_device structure 410 * 411 * This is the driver set bittiming routine. 412 * Return: 0 on success and failure value on error 413 */ 414 static int xcan_set_bittiming(struct net_device *ndev) 415 { 416 struct xcan_priv *priv = netdev_priv(ndev); 417 struct can_bittiming *bt = &priv->can.bittiming; 418 struct can_bittiming *dbt = &priv->can.data_bittiming; 419 u32 btr0, btr1; 420 u32 is_config_mode; 421 422 /* Check whether Xilinx CAN is in configuration mode. 423 * It cannot set bit timing if Xilinx CAN is not in configuration mode. 424 */ 425 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & 426 XCAN_SR_CONFIG_MASK; 427 if (!is_config_mode) { 428 netdev_alert(ndev, 429 "BUG! Cannot set bittiming - CAN is not in config mode\n"); 430 return -EPERM; 431 } 432 433 /* Setting Baud Rate prescaler value in BRPR Register */ 434 btr0 = (bt->brp - 1); 435 436 /* Setting Time Segment 1 in BTR Register */ 437 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 438 439 /* Setting Time Segment 2 in BTR Register */ 440 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 441 442 /* Setting Synchronous jump width in BTR Register */ 443 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; 444 445 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 446 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 447 448 if (priv->devtype.cantype == XAXI_CANFD || 449 priv->devtype.cantype == XAXI_CANFD_2_0) { 450 /* Setting Baud Rate prescaler value in F_BRPR Register */ 451 btr0 = dbt->brp - 1; 452 if (can_tdc_is_enabled(&priv->can)) { 453 if (priv->devtype.cantype == XAXI_CANFD) 454 btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) | 455 XCAN_BRPR_TDC_ENABLE; 456 else 457 btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) | 458 XCAN_BRPR_TDC_ENABLE; 459 } 460 461 /* Setting Time Segment 1 in BTR Register */ 462 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; 463 464 /* Setting Time Segment 2 in BTR Register */ 465 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 466 467 /* Setting Synchronous jump width in BTR Register */ 468 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; 469 470 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); 471 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); 472 } 473 474 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", 475 priv->read_reg(priv, XCAN_BRPR_OFFSET), 476 priv->read_reg(priv, XCAN_BTR_OFFSET)); 477 478 return 0; 479 } 480 481 /** 482 * xcan_chip_start - This the drivers start routine 483 * @ndev: Pointer to net_device structure 484 * 485 * This is the drivers start routine. 486 * Based on the State of the CAN device it puts 487 * the CAN device into a proper mode. 488 * 489 * Return: 0 on success and failure value on error 490 */ 491 static int xcan_chip_start(struct net_device *ndev) 492 { 493 struct xcan_priv *priv = netdev_priv(ndev); 494 u32 reg_msr; 495 int err; 496 u32 ier; 497 498 /* Check if it is in reset mode */ 499 err = set_reset_mode(ndev); 500 if (err < 0) 501 return err; 502 503 err = xcan_set_bittiming(ndev); 504 if (err < 0) 505 return err; 506 507 /* Enable interrupts 508 * 509 * We enable the ERROR interrupt even with 510 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no 511 * dedicated interrupt for a state change to 512 * ERROR_WARNING/ERROR_PASSIVE. 513 */ 514 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 515 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 516 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 517 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); 518 519 if (priv->devtype.flags & XCAN_FLAG_RXMNF) 520 ier |= XCAN_IXR_RXMNF_MASK; 521 522 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 523 524 /* Check whether it is loopback mode or normal mode */ 525 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 526 reg_msr = XCAN_MSR_LBACK_MASK; 527 else 528 reg_msr = 0x0; 529 530 /* enable the first extended filter, if any, as cores with extended 531 * filtering default to non-receipt if all filters are disabled 532 */ 533 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) 534 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); 535 536 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 537 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 538 539 netdev_dbg(ndev, "status:#x%08x\n", 540 priv->read_reg(priv, XCAN_SR_OFFSET)); 541 542 priv->can.state = CAN_STATE_ERROR_ACTIVE; 543 return 0; 544 } 545 546 /** 547 * xcan_do_set_mode - This sets the mode of the driver 548 * @ndev: Pointer to net_device structure 549 * @mode: Tells the mode of the driver 550 * 551 * This check the drivers state and calls the corresponding modes to set. 552 * 553 * Return: 0 on success and failure value on error 554 */ 555 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 556 { 557 int ret; 558 559 switch (mode) { 560 case CAN_MODE_START: 561 ret = xcan_chip_start(ndev); 562 if (ret < 0) { 563 netdev_err(ndev, "xcan_chip_start failed!\n"); 564 return ret; 565 } 566 netif_wake_queue(ndev); 567 break; 568 default: 569 ret = -EOPNOTSUPP; 570 break; 571 } 572 573 return ret; 574 } 575 576 /** 577 * xcan_write_frame - Write a frame to HW 578 * @ndev: Pointer to net_device structure 579 * @skb: sk_buff pointer that contains data to be Txed 580 * @frame_offset: Register offset to write the frame to 581 */ 582 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, 583 int frame_offset) 584 { 585 u32 id, dlc, data[2] = {0, 0}; 586 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 587 u32 ramoff, dwindex = 0, i; 588 struct xcan_priv *priv = netdev_priv(ndev); 589 590 /* Watch carefully on the bit sequence */ 591 if (cf->can_id & CAN_EFF_FLAG) { 592 /* Extended CAN ID format */ 593 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & 594 XCAN_IDR_ID2_MASK; 595 id |= (((cf->can_id & CAN_EFF_MASK) >> 596 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << 597 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; 598 599 /* The substibute remote TX request bit should be "1" 600 * for extended frames as in the Xilinx CAN datasheet 601 */ 602 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; 603 604 if (cf->can_id & CAN_RTR_FLAG) 605 /* Extended frames remote TX request */ 606 id |= XCAN_IDR_RTR_MASK; 607 } else { 608 /* Standard CAN ID format */ 609 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & 610 XCAN_IDR_ID1_MASK; 611 612 if (cf->can_id & CAN_RTR_FLAG) 613 /* Standard frames remote TX request */ 614 id |= XCAN_IDR_SRR_MASK; 615 } 616 617 dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; 618 if (can_is_canfd_skb(skb)) { 619 if (cf->flags & CANFD_BRS) 620 dlc |= XCAN_DLCR_BRS_MASK; 621 dlc |= XCAN_DLCR_EDL_MASK; 622 } 623 624 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && 625 (priv->devtype.flags & XCAN_FLAG_TXFEMP)) 626 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); 627 else 628 can_put_echo_skb(skb, ndev, 0, 0); 629 630 priv->tx_head++; 631 632 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 633 /* If the CAN frame is RTR frame this write triggers transmission 634 * (not on CAN FD) 635 */ 636 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); 637 if (priv->devtype.cantype == XAXI_CANFD || 638 priv->devtype.cantype == XAXI_CANFD_2_0) { 639 for (i = 0; i < cf->len; i += 4) { 640 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + 641 (dwindex * XCANFD_DW_BYTES); 642 priv->write_reg(priv, ramoff, 643 be32_to_cpup((__be32 *)(cf->data + i))); 644 dwindex++; 645 } 646 } else { 647 if (cf->len > 0) 648 data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); 649 if (cf->len > 4) 650 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 651 652 if (!(cf->can_id & CAN_RTR_FLAG)) { 653 priv->write_reg(priv, 654 XCAN_FRAME_DW1_OFFSET(frame_offset), 655 data[0]); 656 /* If the CAN frame is Standard/Extended frame this 657 * write triggers transmission (not on CAN FD) 658 */ 659 priv->write_reg(priv, 660 XCAN_FRAME_DW2_OFFSET(frame_offset), 661 data[1]); 662 } 663 } 664 } 665 666 /** 667 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) 668 * @skb: sk_buff pointer that contains data to be Txed 669 * @ndev: Pointer to net_device structure 670 * 671 * Return: 0 on success, -ENOSPC if FIFO is full. 672 */ 673 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) 674 { 675 struct xcan_priv *priv = netdev_priv(ndev); 676 unsigned long flags; 677 678 /* Check if the TX buffer is full */ 679 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & 680 XCAN_SR_TXFLL_MASK)) 681 return -ENOSPC; 682 683 spin_lock_irqsave(&priv->tx_lock, flags); 684 685 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); 686 687 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 688 if (priv->tx_max > 1) 689 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 690 691 /* Check if the TX buffer is full */ 692 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 693 netif_stop_queue(ndev); 694 695 spin_unlock_irqrestore(&priv->tx_lock, flags); 696 697 return 0; 698 } 699 700 /** 701 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) 702 * @skb: sk_buff pointer that contains data to be Txed 703 * @ndev: Pointer to net_device structure 704 * 705 * Return: 0 on success, -ENOSPC if there is no space 706 */ 707 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) 708 { 709 struct xcan_priv *priv = netdev_priv(ndev); 710 unsigned long flags; 711 712 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & 713 BIT(XCAN_TX_MAILBOX_IDX))) 714 return -ENOSPC; 715 716 spin_lock_irqsave(&priv->tx_lock, flags); 717 718 xcan_write_frame(ndev, skb, 719 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 720 721 /* Mark buffer as ready for transmit */ 722 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); 723 724 netif_stop_queue(ndev); 725 726 spin_unlock_irqrestore(&priv->tx_lock, flags); 727 728 return 0; 729 } 730 731 /** 732 * xcan_start_xmit - Starts the transmission 733 * @skb: sk_buff pointer that contains data to be Txed 734 * @ndev: Pointer to net_device structure 735 * 736 * This function is invoked from upper layers to initiate transmission. 737 * 738 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full 739 */ 740 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 741 { 742 struct xcan_priv *priv = netdev_priv(ndev); 743 int ret; 744 745 if (can_dropped_invalid_skb(ndev, skb)) 746 return NETDEV_TX_OK; 747 748 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) 749 ret = xcan_start_xmit_mailbox(skb, ndev); 750 else 751 ret = xcan_start_xmit_fifo(skb, ndev); 752 753 if (ret < 0) { 754 netdev_err(ndev, "BUG!, TX full when queue awake!\n"); 755 netif_stop_queue(ndev); 756 return NETDEV_TX_BUSY; 757 } 758 759 return NETDEV_TX_OK; 760 } 761 762 /** 763 * xcan_rx - Is called from CAN isr to complete the received 764 * frame processing 765 * @ndev: Pointer to net_device structure 766 * @frame_base: Register offset to the frame to be read 767 * 768 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 769 * does minimal processing and invokes "netif_receive_skb" to complete further 770 * processing. 771 * Return: 1 on success and 0 on failure. 772 */ 773 static int xcan_rx(struct net_device *ndev, int frame_base) 774 { 775 struct xcan_priv *priv = netdev_priv(ndev); 776 struct net_device_stats *stats = &ndev->stats; 777 struct can_frame *cf; 778 struct sk_buff *skb; 779 u32 id_xcan, dlc, data[2] = {0, 0}; 780 781 skb = alloc_can_skb(ndev, &cf); 782 if (unlikely(!skb)) { 783 stats->rx_dropped++; 784 return 0; 785 } 786 787 /* Read a frame from Xilinx zynq CANPS */ 788 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 789 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> 790 XCAN_DLCR_DLC_SHIFT; 791 792 /* Change Xilinx CAN data length format to socketCAN data format */ 793 cf->len = can_cc_dlc2len(dlc); 794 795 /* Change Xilinx CAN ID format to socketCAN ID format */ 796 if (id_xcan & XCAN_IDR_IDE_MASK) { 797 /* The received frame is an Extended format frame */ 798 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 799 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 800 XCAN_IDR_ID2_SHIFT; 801 cf->can_id |= CAN_EFF_FLAG; 802 if (id_xcan & XCAN_IDR_RTR_MASK) 803 cf->can_id |= CAN_RTR_FLAG; 804 } else { 805 /* The received frame is a standard format frame */ 806 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 807 XCAN_IDR_ID1_SHIFT; 808 if (id_xcan & XCAN_IDR_SRR_MASK) 809 cf->can_id |= CAN_RTR_FLAG; 810 } 811 812 /* DW1/DW2 must always be read to remove message from RXFIFO */ 813 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); 814 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); 815 816 if (!(cf->can_id & CAN_RTR_FLAG)) { 817 /* Change Xilinx CAN data format to socketCAN data format */ 818 if (cf->len > 0) 819 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 820 if (cf->len > 4) 821 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); 822 823 stats->rx_bytes += cf->len; 824 } 825 stats->rx_packets++; 826 827 netif_receive_skb(skb); 828 829 return 1; 830 } 831 832 /** 833 * xcanfd_rx - Is called from CAN isr to complete the received 834 * frame processing 835 * @ndev: Pointer to net_device structure 836 * @frame_base: Register offset to the frame to be read 837 * 838 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 839 * does minimal processing and invokes "netif_receive_skb" to complete further 840 * processing. 841 * Return: 1 on success and 0 on failure. 842 */ 843 static int xcanfd_rx(struct net_device *ndev, int frame_base) 844 { 845 struct xcan_priv *priv = netdev_priv(ndev); 846 struct net_device_stats *stats = &ndev->stats; 847 struct canfd_frame *cf; 848 struct sk_buff *skb; 849 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; 850 851 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 852 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); 853 if (dlc & XCAN_DLCR_EDL_MASK) 854 skb = alloc_canfd_skb(ndev, &cf); 855 else 856 skb = alloc_can_skb(ndev, (struct can_frame **)&cf); 857 858 if (unlikely(!skb)) { 859 stats->rx_dropped++; 860 return 0; 861 } 862 863 /* Change Xilinx CANFD data length format to socketCAN data 864 * format 865 */ 866 if (dlc & XCAN_DLCR_EDL_MASK) 867 cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 868 XCAN_DLCR_DLC_SHIFT); 869 else 870 cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 871 XCAN_DLCR_DLC_SHIFT); 872 873 /* Change Xilinx CAN ID format to socketCAN ID format */ 874 if (id_xcan & XCAN_IDR_IDE_MASK) { 875 /* The received frame is an Extended format frame */ 876 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 877 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 878 XCAN_IDR_ID2_SHIFT; 879 cf->can_id |= CAN_EFF_FLAG; 880 if (id_xcan & XCAN_IDR_RTR_MASK) 881 cf->can_id |= CAN_RTR_FLAG; 882 } else { 883 /* The received frame is a standard format frame */ 884 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 885 XCAN_IDR_ID1_SHIFT; 886 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & 887 XCAN_IDR_SRR_MASK)) 888 cf->can_id |= CAN_RTR_FLAG; 889 } 890 891 /* Check the frame received is FD or not*/ 892 if (dlc & XCAN_DLCR_EDL_MASK) { 893 for (i = 0; i < cf->len; i += 4) { 894 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + 895 (dwindex * XCANFD_DW_BYTES); 896 data[0] = priv->read_reg(priv, dw_offset); 897 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 898 dwindex++; 899 } 900 } else { 901 for (i = 0; i < cf->len; i += 4) { 902 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); 903 data[0] = priv->read_reg(priv, dw_offset + i); 904 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 905 } 906 } 907 908 if (!(cf->can_id & CAN_RTR_FLAG)) 909 stats->rx_bytes += cf->len; 910 stats->rx_packets++; 911 912 netif_receive_skb(skb); 913 914 return 1; 915 } 916 917 /** 918 * xcan_current_error_state - Get current error state from HW 919 * @ndev: Pointer to net_device structure 920 * 921 * Checks the current CAN error state from the HW. Note that this 922 * only checks for ERROR_PASSIVE and ERROR_WARNING. 923 * 924 * Return: 925 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 926 * otherwise. 927 */ 928 static enum can_state xcan_current_error_state(struct net_device *ndev) 929 { 930 struct xcan_priv *priv = netdev_priv(ndev); 931 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 932 933 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 934 return CAN_STATE_ERROR_PASSIVE; 935 else if (status & XCAN_SR_ERRWRN_MASK) 936 return CAN_STATE_ERROR_WARNING; 937 else 938 return CAN_STATE_ERROR_ACTIVE; 939 } 940 941 /** 942 * xcan_set_error_state - Set new CAN error state 943 * @ndev: Pointer to net_device structure 944 * @new_state: The new CAN state to be set 945 * @cf: Error frame to be populated or NULL 946 * 947 * Set new CAN error state for the device, updating statistics and 948 * populating the error frame if given. 949 */ 950 static void xcan_set_error_state(struct net_device *ndev, 951 enum can_state new_state, 952 struct can_frame *cf) 953 { 954 struct xcan_priv *priv = netdev_priv(ndev); 955 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 956 u32 txerr = ecr & XCAN_ECR_TEC_MASK; 957 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 958 enum can_state tx_state = txerr >= rxerr ? new_state : 0; 959 enum can_state rx_state = txerr <= rxerr ? new_state : 0; 960 961 /* non-ERROR states are handled elsewhere */ 962 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) 963 return; 964 965 can_change_state(ndev, cf, tx_state, rx_state); 966 967 if (cf) { 968 cf->data[6] = txerr; 969 cf->data[7] = rxerr; 970 } 971 } 972 973 /** 974 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 975 * @ndev: Pointer to net_device structure 976 * 977 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 978 * the performed RX/TX has caused it to drop to a lesser state and set 979 * the interface state accordingly. 980 */ 981 static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 982 { 983 struct xcan_priv *priv = netdev_priv(ndev); 984 enum can_state old_state = priv->can.state; 985 enum can_state new_state; 986 987 /* changing error state due to successful frame RX/TX can only 988 * occur from these states 989 */ 990 if (old_state != CAN_STATE_ERROR_WARNING && 991 old_state != CAN_STATE_ERROR_PASSIVE) 992 return; 993 994 new_state = xcan_current_error_state(ndev); 995 996 if (new_state != old_state) { 997 struct sk_buff *skb; 998 struct can_frame *cf; 999 1000 skb = alloc_can_err_skb(ndev, &cf); 1001 1002 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 1003 1004 if (skb) 1005 netif_rx(skb); 1006 } 1007 } 1008 1009 /** 1010 * xcan_err_interrupt - error frame Isr 1011 * @ndev: net_device pointer 1012 * @isr: interrupt status register value 1013 * 1014 * This is the CAN error interrupt and it will 1015 * check the type of error and forward the error 1016 * frame to upper layers. 1017 */ 1018 static void xcan_err_interrupt(struct net_device *ndev, u32 isr) 1019 { 1020 struct xcan_priv *priv = netdev_priv(ndev); 1021 struct net_device_stats *stats = &ndev->stats; 1022 struct can_frame cf = { }; 1023 u32 err_status; 1024 1025 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 1026 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 1027 1028 if (isr & XCAN_IXR_BSOFF_MASK) { 1029 priv->can.state = CAN_STATE_BUS_OFF; 1030 priv->can.can_stats.bus_off++; 1031 /* Leave device in Config Mode in bus-off state */ 1032 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1033 can_bus_off(ndev); 1034 cf.can_id |= CAN_ERR_BUSOFF; 1035 } else { 1036 enum can_state new_state = xcan_current_error_state(ndev); 1037 1038 if (new_state != priv->can.state) 1039 xcan_set_error_state(ndev, new_state, &cf); 1040 } 1041 1042 /* Check for Arbitration lost interrupt */ 1043 if (isr & XCAN_IXR_ARBLST_MASK) { 1044 priv->can.can_stats.arbitration_lost++; 1045 cf.can_id |= CAN_ERR_LOSTARB; 1046 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; 1047 } 1048 1049 /* Check for RX FIFO Overflow interrupt */ 1050 if (isr & XCAN_IXR_RXOFLW_MASK) { 1051 stats->rx_over_errors++; 1052 stats->rx_errors++; 1053 cf.can_id |= CAN_ERR_CRTL; 1054 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1055 } 1056 1057 /* Check for RX Match Not Finished interrupt */ 1058 if (isr & XCAN_IXR_RXMNF_MASK) { 1059 stats->rx_dropped++; 1060 stats->rx_errors++; 1061 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1062 cf.can_id |= CAN_ERR_CRTL; 1063 cf.data[1] |= CAN_ERR_CRTL_UNSPEC; 1064 } 1065 1066 /* Check for error interrupt */ 1067 if (isr & XCAN_IXR_ERROR_MASK) { 1068 bool berr_reporting = false; 1069 1070 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 1071 berr_reporting = true; 1072 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1073 } 1074 1075 /* Check for Ack error interrupt */ 1076 if (err_status & XCAN_ESR_ACKER_MASK) { 1077 stats->tx_errors++; 1078 if (berr_reporting) { 1079 cf.can_id |= CAN_ERR_ACK; 1080 cf.data[3] = CAN_ERR_PROT_LOC_ACK; 1081 } 1082 } 1083 1084 /* Check for Bit error interrupt */ 1085 if (err_status & XCAN_ESR_BERR_MASK) { 1086 stats->tx_errors++; 1087 if (berr_reporting) { 1088 cf.can_id |= CAN_ERR_PROT; 1089 cf.data[2] = CAN_ERR_PROT_BIT; 1090 } 1091 } 1092 1093 /* Check for Stuff error interrupt */ 1094 if (err_status & XCAN_ESR_STER_MASK) { 1095 stats->rx_errors++; 1096 if (berr_reporting) { 1097 cf.can_id |= CAN_ERR_PROT; 1098 cf.data[2] = CAN_ERR_PROT_STUFF; 1099 } 1100 } 1101 1102 /* Check for Form error interrupt */ 1103 if (err_status & XCAN_ESR_FMER_MASK) { 1104 stats->rx_errors++; 1105 if (berr_reporting) { 1106 cf.can_id |= CAN_ERR_PROT; 1107 cf.data[2] = CAN_ERR_PROT_FORM; 1108 } 1109 } 1110 1111 /* Check for CRC error interrupt */ 1112 if (err_status & XCAN_ESR_CRCER_MASK) { 1113 stats->rx_errors++; 1114 if (berr_reporting) { 1115 cf.can_id |= CAN_ERR_PROT; 1116 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1117 } 1118 } 1119 priv->can.can_stats.bus_error++; 1120 } 1121 1122 if (cf.can_id) { 1123 struct can_frame *skb_cf; 1124 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); 1125 1126 if (skb) { 1127 skb_cf->can_id |= cf.can_id; 1128 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); 1129 netif_rx(skb); 1130 } 1131 } 1132 1133 netdev_dbg(ndev, "%s: error status register:0x%x\n", 1134 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); 1135 } 1136 1137 /** 1138 * xcan_state_interrupt - It will check the state of the CAN device 1139 * @ndev: net_device pointer 1140 * @isr: interrupt status register value 1141 * 1142 * This will checks the state of the CAN device 1143 * and puts the device into appropriate state. 1144 */ 1145 static void xcan_state_interrupt(struct net_device *ndev, u32 isr) 1146 { 1147 struct xcan_priv *priv = netdev_priv(ndev); 1148 1149 /* Check for Sleep interrupt if set put CAN device in sleep state */ 1150 if (isr & XCAN_IXR_SLP_MASK) 1151 priv->can.state = CAN_STATE_SLEEPING; 1152 1153 /* Check for Wake up interrupt if set put CAN device in Active state */ 1154 if (isr & XCAN_IXR_WKUP_MASK) 1155 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1156 } 1157 1158 /** 1159 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame 1160 * @priv: Driver private data structure 1161 * 1162 * Return: Register offset of the next frame in RX FIFO. 1163 */ 1164 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) 1165 { 1166 int offset; 1167 1168 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { 1169 u32 fsr, mask; 1170 1171 /* clear RXOK before the is-empty check so that any newly 1172 * received frame will reassert it without a race 1173 */ 1174 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); 1175 1176 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 1177 1178 /* check if RX FIFO is empty */ 1179 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1180 mask = XCAN_2_FSR_FL_MASK; 1181 else 1182 mask = XCAN_FSR_FL_MASK; 1183 1184 if (!(fsr & mask)) 1185 return -ENOENT; 1186 1187 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1188 offset = 1189 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); 1190 else 1191 offset = 1192 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1193 1194 } else { 1195 /* check if RX FIFO is empty */ 1196 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & 1197 XCAN_IXR_RXNEMP_MASK)) 1198 return -ENOENT; 1199 1200 /* frames are read from a static offset */ 1201 offset = XCAN_RXFIFO_OFFSET; 1202 } 1203 1204 return offset; 1205 } 1206 1207 /** 1208 * xcan_rx_poll - Poll routine for rx packets (NAPI) 1209 * @napi: napi structure pointer 1210 * @quota: Max number of rx packets to be processed. 1211 * 1212 * This is the poll routine for rx part. 1213 * It will process the packets maximux quota value. 1214 * 1215 * Return: number of packets received 1216 */ 1217 static int xcan_rx_poll(struct napi_struct *napi, int quota) 1218 { 1219 struct net_device *ndev = napi->dev; 1220 struct xcan_priv *priv = netdev_priv(ndev); 1221 u32 ier; 1222 int work_done = 0; 1223 int frame_offset; 1224 1225 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && 1226 (work_done < quota)) { 1227 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) 1228 work_done += xcanfd_rx(ndev, frame_offset); 1229 else 1230 work_done += xcan_rx(ndev, frame_offset); 1231 1232 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 1233 /* increment read index */ 1234 priv->write_reg(priv, XCAN_FSR_OFFSET, 1235 XCAN_FSR_IRI_MASK); 1236 else 1237 /* clear rx-not-empty (will actually clear only if 1238 * empty) 1239 */ 1240 priv->write_reg(priv, XCAN_ICR_OFFSET, 1241 XCAN_IXR_RXNEMP_MASK); 1242 } 1243 1244 if (work_done) 1245 xcan_update_error_state_after_rxtx(ndev); 1246 1247 if (work_done < quota) { 1248 if (napi_complete_done(napi, work_done)) { 1249 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1250 ier |= xcan_rx_int_mask(priv); 1251 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1252 } 1253 } 1254 return work_done; 1255 } 1256 1257 /** 1258 * xcan_tx_interrupt - Tx Done Isr 1259 * @ndev: net_device pointer 1260 * @isr: Interrupt status register value 1261 */ 1262 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) 1263 { 1264 struct xcan_priv *priv = netdev_priv(ndev); 1265 struct net_device_stats *stats = &ndev->stats; 1266 unsigned int frames_in_fifo; 1267 int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 1268 unsigned long flags; 1269 int retries = 0; 1270 1271 /* Synchronize with xmit as we need to know the exact number 1272 * of frames in the FIFO to stay in sync due to the TXFEMP 1273 * handling. 1274 * This also prevents a race between netif_wake_queue() and 1275 * netif_stop_queue(). 1276 */ 1277 spin_lock_irqsave(&priv->tx_lock, flags); 1278 1279 frames_in_fifo = priv->tx_head - priv->tx_tail; 1280 1281 if (WARN_ON_ONCE(frames_in_fifo == 0)) { 1282 /* clear TXOK anyway to avoid getting back here */ 1283 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1284 spin_unlock_irqrestore(&priv->tx_lock, flags); 1285 return; 1286 } 1287 1288 /* Check if 2 frames were sent (TXOK only means that at least 1 1289 * frame was sent). 1290 */ 1291 if (frames_in_fifo > 1) { 1292 WARN_ON(frames_in_fifo > priv->tx_max); 1293 1294 /* Synchronize TXOK and isr so that after the loop: 1295 * (1) isr variable is up-to-date at least up to TXOK clear 1296 * time. This avoids us clearing a TXOK of a second frame 1297 * but not noticing that the FIFO is now empty and thus 1298 * marking only a single frame as sent. 1299 * (2) No TXOK is left. Having one could mean leaving a 1300 * stray TXOK as we might process the associated frame 1301 * via TXFEMP handling as we read TXFEMP *after* TXOK 1302 * clear to satisfy (1). 1303 */ 1304 while ((isr & XCAN_IXR_TXOK_MASK) && 1305 !WARN_ON(++retries == 100)) { 1306 priv->write_reg(priv, XCAN_ICR_OFFSET, 1307 XCAN_IXR_TXOK_MASK); 1308 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1309 } 1310 1311 if (isr & XCAN_IXR_TXFEMP_MASK) { 1312 /* nothing in FIFO anymore */ 1313 frames_sent = frames_in_fifo; 1314 } 1315 } else { 1316 /* single frame in fifo, just clear TXOK */ 1317 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1318 } 1319 1320 while (frames_sent--) { 1321 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % 1322 priv->tx_max, NULL); 1323 priv->tx_tail++; 1324 stats->tx_packets++; 1325 } 1326 1327 netif_wake_queue(ndev); 1328 1329 spin_unlock_irqrestore(&priv->tx_lock, flags); 1330 1331 xcan_update_error_state_after_rxtx(ndev); 1332 } 1333 1334 /** 1335 * xcan_interrupt - CAN Isr 1336 * @irq: irq number 1337 * @dev_id: device id pointer 1338 * 1339 * This is the xilinx CAN Isr. It checks for the type of interrupt 1340 * and invokes the corresponding ISR. 1341 * 1342 * Return: 1343 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise 1344 */ 1345 static irqreturn_t xcan_interrupt(int irq, void *dev_id) 1346 { 1347 struct net_device *ndev = (struct net_device *)dev_id; 1348 struct xcan_priv *priv = netdev_priv(ndev); 1349 u32 isr, ier; 1350 u32 isr_errors; 1351 u32 rx_int_mask = xcan_rx_int_mask(priv); 1352 1353 /* Get the interrupt status from Xilinx CAN */ 1354 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1355 if (!isr) 1356 return IRQ_NONE; 1357 1358 /* Check for the type of interrupt and Processing it */ 1359 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { 1360 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | 1361 XCAN_IXR_WKUP_MASK)); 1362 xcan_state_interrupt(ndev, isr); 1363 } 1364 1365 /* Check for Tx interrupt and Processing it */ 1366 if (isr & XCAN_IXR_TXOK_MASK) 1367 xcan_tx_interrupt(ndev, isr); 1368 1369 /* Check for the type of error interrupt and Processing it */ 1370 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1371 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | 1372 XCAN_IXR_RXMNF_MASK); 1373 if (isr_errors) { 1374 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 1375 xcan_err_interrupt(ndev, isr); 1376 } 1377 1378 /* Check for the type of receive interrupt and Processing it */ 1379 if (isr & rx_int_mask) { 1380 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1381 ier &= ~rx_int_mask; 1382 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1383 napi_schedule(&priv->napi); 1384 } 1385 return IRQ_HANDLED; 1386 } 1387 1388 /** 1389 * xcan_chip_stop - Driver stop routine 1390 * @ndev: Pointer to net_device structure 1391 * 1392 * This is the drivers stop routine. It will disable the 1393 * interrupts and put the device into configuration mode. 1394 */ 1395 static void xcan_chip_stop(struct net_device *ndev) 1396 { 1397 struct xcan_priv *priv = netdev_priv(ndev); 1398 int ret; 1399 1400 /* Disable interrupts and leave the can in configuration mode */ 1401 ret = set_reset_mode(ndev); 1402 if (ret < 0) 1403 netdev_dbg(ndev, "set_reset_mode() Failed\n"); 1404 1405 priv->can.state = CAN_STATE_STOPPED; 1406 } 1407 1408 /** 1409 * xcan_open - Driver open routine 1410 * @ndev: Pointer to net_device structure 1411 * 1412 * This is the driver open routine. 1413 * Return: 0 on success and failure value on error 1414 */ 1415 static int xcan_open(struct net_device *ndev) 1416 { 1417 struct xcan_priv *priv = netdev_priv(ndev); 1418 int ret; 1419 1420 ret = pm_runtime_get_sync(priv->dev); 1421 if (ret < 0) { 1422 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1423 __func__, ret); 1424 goto err; 1425 } 1426 1427 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, 1428 ndev->name, ndev); 1429 if (ret < 0) { 1430 netdev_err(ndev, "irq allocation for CAN failed\n"); 1431 goto err; 1432 } 1433 1434 /* Set chip into reset mode */ 1435 ret = set_reset_mode(ndev); 1436 if (ret < 0) { 1437 netdev_err(ndev, "mode resetting failed!\n"); 1438 goto err_irq; 1439 } 1440 1441 /* Common open */ 1442 ret = open_candev(ndev); 1443 if (ret) 1444 goto err_irq; 1445 1446 ret = xcan_chip_start(ndev); 1447 if (ret < 0) { 1448 netdev_err(ndev, "xcan_chip_start failed!\n"); 1449 goto err_candev; 1450 } 1451 1452 napi_enable(&priv->napi); 1453 netif_start_queue(ndev); 1454 1455 return 0; 1456 1457 err_candev: 1458 close_candev(ndev); 1459 err_irq: 1460 free_irq(ndev->irq, ndev); 1461 err: 1462 pm_runtime_put(priv->dev); 1463 1464 return ret; 1465 } 1466 1467 /** 1468 * xcan_close - Driver close routine 1469 * @ndev: Pointer to net_device structure 1470 * 1471 * Return: 0 always 1472 */ 1473 static int xcan_close(struct net_device *ndev) 1474 { 1475 struct xcan_priv *priv = netdev_priv(ndev); 1476 1477 netif_stop_queue(ndev); 1478 napi_disable(&priv->napi); 1479 xcan_chip_stop(ndev); 1480 free_irq(ndev->irq, ndev); 1481 close_candev(ndev); 1482 1483 pm_runtime_put(priv->dev); 1484 1485 return 0; 1486 } 1487 1488 /** 1489 * xcan_get_berr_counter - error counter routine 1490 * @ndev: Pointer to net_device structure 1491 * @bec: Pointer to can_berr_counter structure 1492 * 1493 * This is the driver error counter routine. 1494 * Return: 0 on success and failure value on error 1495 */ 1496 static int xcan_get_berr_counter(const struct net_device *ndev, 1497 struct can_berr_counter *bec) 1498 { 1499 struct xcan_priv *priv = netdev_priv(ndev); 1500 int ret; 1501 1502 ret = pm_runtime_get_sync(priv->dev); 1503 if (ret < 0) { 1504 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1505 __func__, ret); 1506 pm_runtime_put(priv->dev); 1507 return ret; 1508 } 1509 1510 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 1511 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 1512 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 1513 1514 pm_runtime_put(priv->dev); 1515 1516 return 0; 1517 } 1518 1519 /** 1520 * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value 1521 * @ndev: Pointer to net_device structure 1522 * @tdcv: Pointer to TDCV value 1523 * 1524 * Return: 0 on success 1525 */ 1526 static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) 1527 { 1528 struct xcan_priv *priv = netdev_priv(ndev); 1529 1530 *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET)); 1531 1532 return 0; 1533 } 1534 1535 static const struct net_device_ops xcan_netdev_ops = { 1536 .ndo_open = xcan_open, 1537 .ndo_stop = xcan_close, 1538 .ndo_start_xmit = xcan_start_xmit, 1539 .ndo_change_mtu = can_change_mtu, 1540 }; 1541 1542 /** 1543 * xcan_suspend - Suspend method for the driver 1544 * @dev: Address of the device structure 1545 * 1546 * Put the driver into low power mode. 1547 * Return: 0 on success and failure value on error 1548 */ 1549 static int __maybe_unused xcan_suspend(struct device *dev) 1550 { 1551 struct net_device *ndev = dev_get_drvdata(dev); 1552 1553 if (netif_running(ndev)) { 1554 netif_stop_queue(ndev); 1555 netif_device_detach(ndev); 1556 xcan_chip_stop(ndev); 1557 } 1558 1559 return pm_runtime_force_suspend(dev); 1560 } 1561 1562 /** 1563 * xcan_resume - Resume from suspend 1564 * @dev: Address of the device structure 1565 * 1566 * Resume operation after suspend. 1567 * Return: 0 on success and failure value on error 1568 */ 1569 static int __maybe_unused xcan_resume(struct device *dev) 1570 { 1571 struct net_device *ndev = dev_get_drvdata(dev); 1572 int ret; 1573 1574 ret = pm_runtime_force_resume(dev); 1575 if (ret) { 1576 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 1577 return ret; 1578 } 1579 1580 if (netif_running(ndev)) { 1581 ret = xcan_chip_start(ndev); 1582 if (ret) { 1583 dev_err(dev, "xcan_chip_start failed on resume\n"); 1584 return ret; 1585 } 1586 1587 netif_device_attach(ndev); 1588 netif_start_queue(ndev); 1589 } 1590 1591 return 0; 1592 } 1593 1594 /** 1595 * xcan_runtime_suspend - Runtime suspend method for the driver 1596 * @dev: Address of the device structure 1597 * 1598 * Put the driver into low power mode. 1599 * Return: 0 always 1600 */ 1601 static int __maybe_unused xcan_runtime_suspend(struct device *dev) 1602 { 1603 struct net_device *ndev = dev_get_drvdata(dev); 1604 struct xcan_priv *priv = netdev_priv(ndev); 1605 1606 clk_disable_unprepare(priv->bus_clk); 1607 clk_disable_unprepare(priv->can_clk); 1608 1609 return 0; 1610 } 1611 1612 /** 1613 * xcan_runtime_resume - Runtime resume from suspend 1614 * @dev: Address of the device structure 1615 * 1616 * Resume operation after suspend. 1617 * Return: 0 on success and failure value on error 1618 */ 1619 static int __maybe_unused xcan_runtime_resume(struct device *dev) 1620 { 1621 struct net_device *ndev = dev_get_drvdata(dev); 1622 struct xcan_priv *priv = netdev_priv(ndev); 1623 int ret; 1624 1625 ret = clk_prepare_enable(priv->bus_clk); 1626 if (ret) { 1627 dev_err(dev, "Cannot enable clock.\n"); 1628 return ret; 1629 } 1630 ret = clk_prepare_enable(priv->can_clk); 1631 if (ret) { 1632 dev_err(dev, "Cannot enable clock.\n"); 1633 clk_disable_unprepare(priv->bus_clk); 1634 return ret; 1635 } 1636 1637 return 0; 1638 } 1639 1640 static const struct dev_pm_ops xcan_dev_pm_ops = { 1641 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1642 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1643 }; 1644 1645 static const struct xcan_devtype_data xcan_zynq_data = { 1646 .cantype = XZYNQ_CANPS, 1647 .flags = XCAN_FLAG_TXFEMP, 1648 .bittiming_const = &xcan_bittiming_const, 1649 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1650 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1651 .bus_clk_name = "pclk", 1652 }; 1653 1654 static const struct xcan_devtype_data xcan_axi_data = { 1655 .cantype = XAXI_CAN, 1656 .bittiming_const = &xcan_bittiming_const, 1657 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1658 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1659 .bus_clk_name = "s_axi_aclk", 1660 }; 1661 1662 static const struct xcan_devtype_data xcan_canfd_data = { 1663 .cantype = XAXI_CANFD, 1664 .flags = XCAN_FLAG_EXT_FILTERS | 1665 XCAN_FLAG_RXMNF | 1666 XCAN_FLAG_TX_MAILBOXES | 1667 XCAN_FLAG_RX_FIFO_MULTI, 1668 .bittiming_const = &xcan_bittiming_const_canfd, 1669 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1670 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1671 .bus_clk_name = "s_axi_aclk", 1672 }; 1673 1674 static const struct xcan_devtype_data xcan_canfd2_data = { 1675 .cantype = XAXI_CANFD_2_0, 1676 .flags = XCAN_FLAG_EXT_FILTERS | 1677 XCAN_FLAG_RXMNF | 1678 XCAN_FLAG_TX_MAILBOXES | 1679 XCAN_FLAG_CANFD_2 | 1680 XCAN_FLAG_RX_FIFO_MULTI, 1681 .bittiming_const = &xcan_bittiming_const_canfd2, 1682 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1683 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1684 .bus_clk_name = "s_axi_aclk", 1685 }; 1686 1687 /* Match table for OF platform binding */ 1688 static const struct of_device_id xcan_of_match[] = { 1689 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1690 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, 1691 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, 1692 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, 1693 { /* end of list */ }, 1694 }; 1695 MODULE_DEVICE_TABLE(of, xcan_of_match); 1696 1697 /** 1698 * xcan_probe - Platform registration call 1699 * @pdev: Handle to the platform device structure 1700 * 1701 * This function does all the memory allocation and registration for the CAN 1702 * device. 1703 * 1704 * Return: 0 on success and failure value on error 1705 */ 1706 static int xcan_probe(struct platform_device *pdev) 1707 { 1708 struct net_device *ndev; 1709 struct xcan_priv *priv; 1710 const struct of_device_id *of_id; 1711 const struct xcan_devtype_data *devtype = &xcan_axi_data; 1712 void __iomem *addr; 1713 int ret; 1714 int rx_max, tx_max; 1715 u32 hw_tx_max = 0, hw_rx_max = 0; 1716 const char *hw_tx_max_property; 1717 1718 /* Get the virtual base address for the device */ 1719 addr = devm_platform_ioremap_resource(pdev, 0); 1720 if (IS_ERR(addr)) { 1721 ret = PTR_ERR(addr); 1722 goto err; 1723 } 1724 1725 of_id = of_match_device(xcan_of_match, &pdev->dev); 1726 if (of_id && of_id->data) 1727 devtype = of_id->data; 1728 1729 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? 1730 "tx-mailbox-count" : "tx-fifo-depth"; 1731 1732 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, 1733 &hw_tx_max); 1734 if (ret < 0) { 1735 dev_err(&pdev->dev, "missing %s property\n", 1736 hw_tx_max_property); 1737 goto err; 1738 } 1739 1740 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", 1741 &hw_rx_max); 1742 if (ret < 0) { 1743 dev_err(&pdev->dev, 1744 "missing rx-fifo-depth property (mailbox mode is not supported)\n"); 1745 goto err; 1746 } 1747 1748 /* With TX FIFO: 1749 * 1750 * There is no way to directly figure out how many frames have been 1751 * sent when the TXOK interrupt is processed. If TXFEMP 1752 * is supported, we can have 2 frames in the FIFO and use TXFEMP 1753 * to determine if 1 or 2 frames have been sent. 1754 * Theoretically we should be able to use TXFWMEMP to determine up 1755 * to 3 frames, but it seems that after putting a second frame in the 1756 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1757 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1758 * sent), which is not a sensible state - possibly TXFWMEMP is not 1759 * completely synchronized with the rest of the bits? 1760 * 1761 * With TX mailboxes: 1762 * 1763 * HW sends frames in CAN ID priority order. To preserve FIFO ordering 1764 * we submit frames one at a time. 1765 */ 1766 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && 1767 (devtype->flags & XCAN_FLAG_TXFEMP)) 1768 tx_max = min(hw_tx_max, 2U); 1769 else 1770 tx_max = 1; 1771 1772 rx_max = hw_rx_max; 1773 1774 /* Create a CAN device instance */ 1775 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1776 if (!ndev) 1777 return -ENOMEM; 1778 1779 priv = netdev_priv(ndev); 1780 priv->dev = &pdev->dev; 1781 priv->can.bittiming_const = devtype->bittiming_const; 1782 priv->can.do_set_mode = xcan_do_set_mode; 1783 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1784 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1785 CAN_CTRLMODE_BERR_REPORTING; 1786 1787 if (devtype->cantype == XAXI_CANFD) { 1788 priv->can.data_bittiming_const = 1789 &xcan_data_bittiming_const_canfd; 1790 priv->can.tdc_const = &xcan_tdc_const_canfd; 1791 } 1792 1793 if (devtype->cantype == XAXI_CANFD_2_0) { 1794 priv->can.data_bittiming_const = 1795 &xcan_data_bittiming_const_canfd2; 1796 priv->can.tdc_const = &xcan_tdc_const_canfd2; 1797 } 1798 1799 if (devtype->cantype == XAXI_CANFD || 1800 devtype->cantype == XAXI_CANFD_2_0) { 1801 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | 1802 CAN_CTRLMODE_TDC_AUTO; 1803 priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv; 1804 } 1805 1806 priv->reg_base = addr; 1807 priv->tx_max = tx_max; 1808 priv->devtype = *devtype; 1809 spin_lock_init(&priv->tx_lock); 1810 1811 /* Get IRQ for the device */ 1812 ret = platform_get_irq(pdev, 0); 1813 if (ret < 0) 1814 goto err_free; 1815 1816 ndev->irq = ret; 1817 1818 ndev->flags |= IFF_ECHO; /* We support local echo */ 1819 1820 platform_set_drvdata(pdev, ndev); 1821 SET_NETDEV_DEV(ndev, &pdev->dev); 1822 ndev->netdev_ops = &xcan_netdev_ops; 1823 1824 /* Getting the CAN can_clk info */ 1825 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1826 if (IS_ERR(priv->can_clk)) { 1827 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), 1828 "device clock not found\n"); 1829 goto err_free; 1830 } 1831 1832 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1833 if (IS_ERR(priv->bus_clk)) { 1834 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), 1835 "bus clock not found\n"); 1836 goto err_free; 1837 } 1838 1839 priv->write_reg = xcan_write_reg_le; 1840 priv->read_reg = xcan_read_reg_le; 1841 1842 pm_runtime_enable(&pdev->dev); 1843 ret = pm_runtime_get_sync(&pdev->dev); 1844 if (ret < 0) { 1845 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1846 __func__, ret); 1847 goto err_disableclks; 1848 } 1849 1850 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { 1851 priv->write_reg = xcan_write_reg_be; 1852 priv->read_reg = xcan_read_reg_be; 1853 } 1854 1855 priv->can.clock.freq = clk_get_rate(priv->can_clk); 1856 1857 netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max); 1858 1859 ret = register_candev(ndev); 1860 if (ret) { 1861 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); 1862 goto err_disableclks; 1863 } 1864 1865 pm_runtime_put(&pdev->dev); 1866 1867 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { 1868 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); 1869 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); 1870 } 1871 1872 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", 1873 priv->reg_base, ndev->irq, priv->can.clock.freq, 1874 hw_tx_max, priv->tx_max); 1875 1876 return 0; 1877 1878 err_disableclks: 1879 pm_runtime_put(priv->dev); 1880 pm_runtime_disable(&pdev->dev); 1881 err_free: 1882 free_candev(ndev); 1883 err: 1884 return ret; 1885 } 1886 1887 /** 1888 * xcan_remove - Unregister the device after releasing the resources 1889 * @pdev: Handle to the platform device structure 1890 * 1891 * This function frees all the resources allocated to the device. 1892 * Return: 0 always 1893 */ 1894 static int xcan_remove(struct platform_device *pdev) 1895 { 1896 struct net_device *ndev = platform_get_drvdata(pdev); 1897 1898 unregister_candev(ndev); 1899 pm_runtime_disable(&pdev->dev); 1900 free_candev(ndev); 1901 1902 return 0; 1903 } 1904 1905 static struct platform_driver xcan_driver = { 1906 .probe = xcan_probe, 1907 .remove = xcan_remove, 1908 .driver = { 1909 .name = DRIVER_NAME, 1910 .pm = &xcan_dev_pm_ops, 1911 .of_match_table = xcan_of_match, 1912 }, 1913 }; 1914 1915 module_platform_driver(xcan_driver); 1916 1917 MODULE_LICENSE("GPL"); 1918 MODULE_AUTHOR("Xilinx Inc"); 1919 MODULE_DESCRIPTION("Xilinx CAN interface"); 1920