1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xilinx CAN device driver 3 * 4 * Copyright (C) 2012 - 2014 Xilinx, Inc. 5 * Copyright (C) 2009 PetaLogix. All rights reserved. 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy 7 * 8 * Description: 9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/skbuff.h> 24 #include <linux/spinlock.h> 25 #include <linux/string.h> 26 #include <linux/types.h> 27 #include <linux/can/dev.h> 28 #include <linux/can/error.h> 29 #include <linux/can/led.h> 30 #include <linux/pm_runtime.h> 31 32 #define DRIVER_NAME "xilinx_can" 33 34 /* CAN registers set */ 35 enum xcan_reg { 36 XCAN_SRR_OFFSET = 0x00, /* Software reset */ 37 XCAN_MSR_OFFSET = 0x04, /* Mode select */ 38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ 39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ 40 XCAN_ECR_OFFSET = 0x10, /* Error counter */ 41 XCAN_ESR_OFFSET = 0x14, /* Error status */ 42 XCAN_SR_OFFSET = 0x18, /* Status */ 43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 46 47 /* not on CAN FD cores */ 48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ 49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ 50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ 51 52 /* only on CAN FD cores */ 53 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate 54 * Prescalar 55 */ 56 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ 57 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ 58 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ 59 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ 60 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ 61 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ 62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ 63 }; 64 65 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) 66 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) 67 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) 68 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) 69 #define XCANFD_FRAME_DW_OFFSET(frame_base, n) (((frame_base) + 0x08) + \ 70 ((n) * XCAN_CANFD_FRAME_SIZE)) 71 72 #define XCAN_CANFD_FRAME_SIZE 0x48 73 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ 74 XCAN_CANFD_FRAME_SIZE * (n)) 75 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ 76 XCAN_CANFD_FRAME_SIZE * (n)) 77 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ 78 XCAN_CANFD_FRAME_SIZE * (n)) 79 80 /* the single TX mailbox used by this driver on CAN FD HW */ 81 #define XCAN_TX_MAILBOX_IDX 0 82 83 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 84 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 85 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 86 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ 87 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ 88 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ 89 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 90 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 91 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 92 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ 93 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ 94 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ 95 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 96 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 97 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 98 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ 99 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ 100 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ 101 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ 102 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ 103 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ 104 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ 105 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 106 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 107 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 108 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ 109 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 110 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 111 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 112 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ 113 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ 114 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ 115 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ 116 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ 117 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ 118 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ 119 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ 120 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ 121 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ 122 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ 123 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 124 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 125 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 126 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ 127 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ 128 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ 129 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ 130 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ 131 132 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 133 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 134 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 135 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ 136 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ 137 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 138 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 139 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 140 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ 141 142 /* CAN frame length constants */ 143 #define XCAN_FRAME_MAX_DATA_LEN 8 144 #define XCANFD_DW_BYTES 4 145 #define XCAN_TIMEOUT (1 * HZ) 146 147 /* TX-FIFO-empty interrupt available */ 148 #define XCAN_FLAG_TXFEMP 0x0001 149 /* RX Match Not Finished interrupt available */ 150 #define XCAN_FLAG_RXMNF 0x0002 151 /* Extended acceptance filters with control at 0xE0 */ 152 #define XCAN_FLAG_EXT_FILTERS 0x0004 153 /* TX mailboxes instead of TX FIFO */ 154 #define XCAN_FLAG_TX_MAILBOXES 0x0008 155 /* RX FIFO with each buffer in separate registers at 0x1100 156 * instead of the regular FIFO at 0x50 157 */ 158 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 159 #define XCAN_FLAG_CANFD_2 0x0020 160 161 enum xcan_ip_type { 162 XAXI_CAN = 0, 163 XZYNQ_CANPS, 164 XAXI_CANFD, 165 XAXI_CANFD_2_0, 166 }; 167 168 struct xcan_devtype_data { 169 enum xcan_ip_type cantype; 170 unsigned int flags; 171 const struct can_bittiming_const *bittiming_const; 172 const char *bus_clk_name; 173 unsigned int btr_ts2_shift; 174 unsigned int btr_sjw_shift; 175 }; 176 177 /** 178 * struct xcan_priv - This definition define CAN driver instance 179 * @can: CAN private data structure. 180 * @tx_lock: Lock for synchronizing TX interrupt handling 181 * @tx_head: Tx CAN packets ready to send on the queue 182 * @tx_tail: Tx CAN packets successfully sended on the queue 183 * @tx_max: Maximum number packets the driver can send 184 * @napi: NAPI structure 185 * @read_reg: For reading data from CAN registers 186 * @write_reg: For writing data to CAN registers 187 * @dev: Network device data structure 188 * @reg_base: Ioremapped address to registers 189 * @irq_flags: For request_irq() 190 * @bus_clk: Pointer to struct clk 191 * @can_clk: Pointer to struct clk 192 * @devtype: Device type specific constants 193 */ 194 struct xcan_priv { 195 struct can_priv can; 196 spinlock_t tx_lock; 197 unsigned int tx_head; 198 unsigned int tx_tail; 199 unsigned int tx_max; 200 struct napi_struct napi; 201 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); 202 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, 203 u32 val); 204 struct device *dev; 205 void __iomem *reg_base; 206 unsigned long irq_flags; 207 struct clk *bus_clk; 208 struct clk *can_clk; 209 struct xcan_devtype_data devtype; 210 }; 211 212 /* CAN Bittiming constants as per Xilinx CAN specs */ 213 static const struct can_bittiming_const xcan_bittiming_const = { 214 .name = DRIVER_NAME, 215 .tseg1_min = 1, 216 .tseg1_max = 16, 217 .tseg2_min = 1, 218 .tseg2_max = 8, 219 .sjw_max = 4, 220 .brp_min = 1, 221 .brp_max = 256, 222 .brp_inc = 1, 223 }; 224 225 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ 226 static const struct can_bittiming_const xcan_bittiming_const_canfd = { 227 .name = DRIVER_NAME, 228 .tseg1_min = 1, 229 .tseg1_max = 64, 230 .tseg2_min = 1, 231 .tseg2_max = 16, 232 .sjw_max = 16, 233 .brp_min = 1, 234 .brp_max = 256, 235 .brp_inc = 1, 236 }; 237 238 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ 239 static struct can_bittiming_const xcan_data_bittiming_const_canfd = { 240 .name = DRIVER_NAME, 241 .tseg1_min = 1, 242 .tseg1_max = 16, 243 .tseg2_min = 1, 244 .tseg2_max = 8, 245 .sjw_max = 8, 246 .brp_min = 1, 247 .brp_max = 256, 248 .brp_inc = 1, 249 }; 250 251 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ 252 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { 253 .name = DRIVER_NAME, 254 .tseg1_min = 1, 255 .tseg1_max = 256, 256 .tseg2_min = 1, 257 .tseg2_max = 128, 258 .sjw_max = 128, 259 .brp_min = 1, 260 .brp_max = 256, 261 .brp_inc = 1, 262 }; 263 264 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ 265 static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { 266 .name = DRIVER_NAME, 267 .tseg1_min = 1, 268 .tseg1_max = 32, 269 .tseg2_min = 1, 270 .tseg2_max = 16, 271 .sjw_max = 16, 272 .brp_min = 1, 273 .brp_max = 256, 274 .brp_inc = 1, 275 }; 276 277 /** 278 * xcan_write_reg_le - Write a value to the device register little endian 279 * @priv: Driver private data structure 280 * @reg: Register offset 281 * @val: Value to write at the Register offset 282 * 283 * Write data to the paricular CAN register 284 */ 285 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, 286 u32 val) 287 { 288 iowrite32(val, priv->reg_base + reg); 289 } 290 291 /** 292 * xcan_read_reg_le - Read a value from the device register little endian 293 * @priv: Driver private data structure 294 * @reg: Register offset 295 * 296 * Read data from the particular CAN register 297 * Return: value read from the CAN register 298 */ 299 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) 300 { 301 return ioread32(priv->reg_base + reg); 302 } 303 304 /** 305 * xcan_write_reg_be - Write a value to the device register big endian 306 * @priv: Driver private data structure 307 * @reg: Register offset 308 * @val: Value to write at the Register offset 309 * 310 * Write data to the paricular CAN register 311 */ 312 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, 313 u32 val) 314 { 315 iowrite32be(val, priv->reg_base + reg); 316 } 317 318 /** 319 * xcan_read_reg_be - Read a value from the device register big endian 320 * @priv: Driver private data structure 321 * @reg: Register offset 322 * 323 * Read data from the particular CAN register 324 * Return: value read from the CAN register 325 */ 326 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) 327 { 328 return ioread32be(priv->reg_base + reg); 329 } 330 331 /** 332 * xcan_rx_int_mask - Get the mask for the receive interrupt 333 * @priv: Driver private data structure 334 * 335 * Return: The receive interrupt mask used by the driver on this HW 336 */ 337 static u32 xcan_rx_int_mask(const struct xcan_priv *priv) 338 { 339 /* RXNEMP is better suited for our use case as it cannot be cleared 340 * while the FIFO is non-empty, but CAN FD HW does not have it 341 */ 342 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 343 return XCAN_IXR_RXOK_MASK; 344 else 345 return XCAN_IXR_RXNEMP_MASK; 346 } 347 348 /** 349 * set_reset_mode - Resets the CAN device mode 350 * @ndev: Pointer to net_device structure 351 * 352 * This is the driver reset mode routine.The driver 353 * enters into configuration mode. 354 * 355 * Return: 0 on success and failure value on error 356 */ 357 static int set_reset_mode(struct net_device *ndev) 358 { 359 struct xcan_priv *priv = netdev_priv(ndev); 360 unsigned long timeout; 361 362 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 363 364 timeout = jiffies + XCAN_TIMEOUT; 365 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { 366 if (time_after(jiffies, timeout)) { 367 netdev_warn(ndev, "timed out for config mode\n"); 368 return -ETIMEDOUT; 369 } 370 usleep_range(500, 10000); 371 } 372 373 /* reset clears FIFOs */ 374 priv->tx_head = 0; 375 priv->tx_tail = 0; 376 377 return 0; 378 } 379 380 /** 381 * xcan_set_bittiming - CAN set bit timing routine 382 * @ndev: Pointer to net_device structure 383 * 384 * This is the driver set bittiming routine. 385 * Return: 0 on success and failure value on error 386 */ 387 static int xcan_set_bittiming(struct net_device *ndev) 388 { 389 struct xcan_priv *priv = netdev_priv(ndev); 390 struct can_bittiming *bt = &priv->can.bittiming; 391 struct can_bittiming *dbt = &priv->can.data_bittiming; 392 u32 btr0, btr1; 393 u32 is_config_mode; 394 395 /* Check whether Xilinx CAN is in configuration mode. 396 * It cannot set bit timing if Xilinx CAN is not in configuration mode. 397 */ 398 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & 399 XCAN_SR_CONFIG_MASK; 400 if (!is_config_mode) { 401 netdev_alert(ndev, 402 "BUG! Cannot set bittiming - CAN is not in config mode\n"); 403 return -EPERM; 404 } 405 406 /* Setting Baud Rate prescalar value in BRPR Register */ 407 btr0 = (bt->brp - 1); 408 409 /* Setting Time Segment 1 in BTR Register */ 410 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 411 412 /* Setting Time Segment 2 in BTR Register */ 413 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 414 415 /* Setting Synchronous jump width in BTR Register */ 416 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; 417 418 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 419 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 420 421 if (priv->devtype.cantype == XAXI_CANFD || 422 priv->devtype.cantype == XAXI_CANFD_2_0) { 423 /* Setting Baud Rate prescalar value in F_BRPR Register */ 424 btr0 = dbt->brp - 1; 425 426 /* Setting Time Segment 1 in BTR Register */ 427 btr1 = dbt->prop_seg + bt->phase_seg1 - 1; 428 429 /* Setting Time Segment 2 in BTR Register */ 430 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 431 432 /* Setting Synchronous jump width in BTR Register */ 433 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; 434 435 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); 436 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); 437 } 438 439 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", 440 priv->read_reg(priv, XCAN_BRPR_OFFSET), 441 priv->read_reg(priv, XCAN_BTR_OFFSET)); 442 443 return 0; 444 } 445 446 /** 447 * xcan_chip_start - This the drivers start routine 448 * @ndev: Pointer to net_device structure 449 * 450 * This is the drivers start routine. 451 * Based on the State of the CAN device it puts 452 * the CAN device into a proper mode. 453 * 454 * Return: 0 on success and failure value on error 455 */ 456 static int xcan_chip_start(struct net_device *ndev) 457 { 458 struct xcan_priv *priv = netdev_priv(ndev); 459 u32 reg_msr, reg_sr_mask; 460 int err; 461 unsigned long timeout; 462 u32 ier; 463 464 /* Check if it is in reset mode */ 465 err = set_reset_mode(ndev); 466 if (err < 0) 467 return err; 468 469 err = xcan_set_bittiming(ndev); 470 if (err < 0) 471 return err; 472 473 /* Enable interrupts */ 474 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 475 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 476 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 477 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); 478 479 if (priv->devtype.flags & XCAN_FLAG_RXMNF) 480 ier |= XCAN_IXR_RXMNF_MASK; 481 482 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 483 484 /* Check whether it is loopback mode or normal mode */ 485 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 486 reg_msr = XCAN_MSR_LBACK_MASK; 487 reg_sr_mask = XCAN_SR_LBACK_MASK; 488 } else { 489 reg_msr = 0x0; 490 reg_sr_mask = XCAN_SR_NORMAL_MASK; 491 } 492 493 /* enable the first extended filter, if any, as cores with extended 494 * filtering default to non-receipt if all filters are disabled 495 */ 496 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) 497 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); 498 499 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 500 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 501 502 timeout = jiffies + XCAN_TIMEOUT; 503 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) { 504 if (time_after(jiffies, timeout)) { 505 netdev_warn(ndev, 506 "timed out for correct mode\n"); 507 return -ETIMEDOUT; 508 } 509 } 510 netdev_dbg(ndev, "status:#x%08x\n", 511 priv->read_reg(priv, XCAN_SR_OFFSET)); 512 513 priv->can.state = CAN_STATE_ERROR_ACTIVE; 514 return 0; 515 } 516 517 /** 518 * xcan_do_set_mode - This sets the mode of the driver 519 * @ndev: Pointer to net_device structure 520 * @mode: Tells the mode of the driver 521 * 522 * This check the drivers state and calls the 523 * the corresponding modes to set. 524 * 525 * Return: 0 on success and failure value on error 526 */ 527 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 528 { 529 int ret; 530 531 switch (mode) { 532 case CAN_MODE_START: 533 ret = xcan_chip_start(ndev); 534 if (ret < 0) { 535 netdev_err(ndev, "xcan_chip_start failed!\n"); 536 return ret; 537 } 538 netif_wake_queue(ndev); 539 break; 540 default: 541 ret = -EOPNOTSUPP; 542 break; 543 } 544 545 return ret; 546 } 547 548 /** 549 * xcan_write_frame - Write a frame to HW 550 * @priv: Driver private data structure 551 * @skb: sk_buff pointer that contains data to be Txed 552 * @frame_offset: Register offset to write the frame to 553 */ 554 static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, 555 int frame_offset) 556 { 557 u32 id, dlc, data[2] = {0, 0}; 558 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 559 u32 ramoff, dwindex = 0, i; 560 561 /* Watch carefully on the bit sequence */ 562 if (cf->can_id & CAN_EFF_FLAG) { 563 /* Extended CAN ID format */ 564 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & 565 XCAN_IDR_ID2_MASK; 566 id |= (((cf->can_id & CAN_EFF_MASK) >> 567 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << 568 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; 569 570 /* The substibute remote TX request bit should be "1" 571 * for extended frames as in the Xilinx CAN datasheet 572 */ 573 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; 574 575 if (cf->can_id & CAN_RTR_FLAG) 576 /* Extended frames remote TX request */ 577 id |= XCAN_IDR_RTR_MASK; 578 } else { 579 /* Standard CAN ID format */ 580 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & 581 XCAN_IDR_ID1_MASK; 582 583 if (cf->can_id & CAN_RTR_FLAG) 584 /* Standard frames remote TX request */ 585 id |= XCAN_IDR_SRR_MASK; 586 } 587 588 dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; 589 if (can_is_canfd_skb(skb)) { 590 if (cf->flags & CANFD_BRS) 591 dlc |= XCAN_DLCR_BRS_MASK; 592 dlc |= XCAN_DLCR_EDL_MASK; 593 } 594 595 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 596 /* If the CAN frame is RTR frame this write triggers transmission 597 * (not on CAN FD) 598 */ 599 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); 600 if (priv->devtype.cantype == XAXI_CANFD || 601 priv->devtype.cantype == XAXI_CANFD_2_0) { 602 for (i = 0; i < cf->len; i += 4) { 603 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset, dwindex) + 604 (dwindex * XCANFD_DW_BYTES); 605 priv->write_reg(priv, ramoff, 606 be32_to_cpup((__be32 *)(cf->data + i))); 607 dwindex++; 608 } 609 } else { 610 if (cf->len > 0) 611 data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); 612 if (cf->len > 4) 613 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 614 615 if (!(cf->can_id & CAN_RTR_FLAG)) { 616 priv->write_reg(priv, 617 XCAN_FRAME_DW1_OFFSET(frame_offset), 618 data[0]); 619 /* If the CAN frame is Standard/Extended frame this 620 * write triggers transmission (not on CAN FD) 621 */ 622 priv->write_reg(priv, 623 XCAN_FRAME_DW2_OFFSET(frame_offset), 624 data[1]); 625 } 626 } 627 } 628 629 /** 630 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) 631 * @skb: sk_buff pointer that contains data to be Txed 632 * @ndev: Pointer to net_device structure 633 * 634 * Return: 0 on success, -ENOSPC if FIFO is full. 635 */ 636 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) 637 { 638 struct xcan_priv *priv = netdev_priv(ndev); 639 unsigned long flags; 640 641 /* Check if the TX buffer is full */ 642 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & 643 XCAN_SR_TXFLL_MASK)) 644 return -ENOSPC; 645 646 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 647 648 spin_lock_irqsave(&priv->tx_lock, flags); 649 650 priv->tx_head++; 651 652 xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET); 653 654 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 655 if (priv->tx_max > 1) 656 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 657 658 /* Check if the TX buffer is full */ 659 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 660 netif_stop_queue(ndev); 661 662 spin_unlock_irqrestore(&priv->tx_lock, flags); 663 664 return 0; 665 } 666 667 /** 668 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) 669 * @skb: sk_buff pointer that contains data to be Txed 670 * @ndev: Pointer to net_device structure 671 * 672 * Return: 0 on success, -ENOSPC if there is no space 673 */ 674 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) 675 { 676 struct xcan_priv *priv = netdev_priv(ndev); 677 unsigned long flags; 678 679 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & 680 BIT(XCAN_TX_MAILBOX_IDX))) 681 return -ENOSPC; 682 683 can_put_echo_skb(skb, ndev, 0); 684 685 spin_lock_irqsave(&priv->tx_lock, flags); 686 687 priv->tx_head++; 688 689 xcan_write_frame(priv, skb, 690 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 691 692 /* Mark buffer as ready for transmit */ 693 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); 694 695 netif_stop_queue(ndev); 696 697 spin_unlock_irqrestore(&priv->tx_lock, flags); 698 699 return 0; 700 } 701 702 /** 703 * xcan_start_xmit - Starts the transmission 704 * @skb: sk_buff pointer that contains data to be Txed 705 * @ndev: Pointer to net_device structure 706 * 707 * This function is invoked from upper layers to initiate transmission. 708 * 709 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full 710 */ 711 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 712 { 713 struct xcan_priv *priv = netdev_priv(ndev); 714 int ret; 715 716 if (can_dropped_invalid_skb(ndev, skb)) 717 return NETDEV_TX_OK; 718 719 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) 720 ret = xcan_start_xmit_mailbox(skb, ndev); 721 else 722 ret = xcan_start_xmit_fifo(skb, ndev); 723 724 if (ret < 0) { 725 netdev_err(ndev, "BUG!, TX full when queue awake!\n"); 726 netif_stop_queue(ndev); 727 return NETDEV_TX_BUSY; 728 } 729 730 return NETDEV_TX_OK; 731 } 732 733 /** 734 * xcan_rx - Is called from CAN isr to complete the received 735 * frame processing 736 * @ndev: Pointer to net_device structure 737 * @frame_base: Register offset to the frame to be read 738 * 739 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 740 * does minimal processing and invokes "netif_receive_skb" to complete further 741 * processing. 742 * Return: 1 on success and 0 on failure. 743 */ 744 static int xcan_rx(struct net_device *ndev, int frame_base) 745 { 746 struct xcan_priv *priv = netdev_priv(ndev); 747 struct net_device_stats *stats = &ndev->stats; 748 struct can_frame *cf; 749 struct sk_buff *skb; 750 u32 id_xcan, dlc, data[2] = {0, 0}; 751 752 skb = alloc_can_skb(ndev, &cf); 753 if (unlikely(!skb)) { 754 stats->rx_dropped++; 755 return 0; 756 } 757 758 /* Read a frame from Xilinx zynq CANPS */ 759 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 760 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> 761 XCAN_DLCR_DLC_SHIFT; 762 763 /* Change Xilinx CAN data length format to socketCAN data format */ 764 cf->can_dlc = get_can_dlc(dlc); 765 766 /* Change Xilinx CAN ID format to socketCAN ID format */ 767 if (id_xcan & XCAN_IDR_IDE_MASK) { 768 /* The received frame is an Extended format frame */ 769 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 770 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 771 XCAN_IDR_ID2_SHIFT; 772 cf->can_id |= CAN_EFF_FLAG; 773 if (id_xcan & XCAN_IDR_RTR_MASK) 774 cf->can_id |= CAN_RTR_FLAG; 775 } else { 776 /* The received frame is a standard format frame */ 777 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 778 XCAN_IDR_ID1_SHIFT; 779 if (id_xcan & XCAN_IDR_SRR_MASK) 780 cf->can_id |= CAN_RTR_FLAG; 781 } 782 783 /* DW1/DW2 must always be read to remove message from RXFIFO */ 784 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); 785 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); 786 787 if (!(cf->can_id & CAN_RTR_FLAG)) { 788 /* Change Xilinx CAN data format to socketCAN data format */ 789 if (cf->can_dlc > 0) 790 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 791 if (cf->can_dlc > 4) 792 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); 793 } 794 795 stats->rx_bytes += cf->can_dlc; 796 stats->rx_packets++; 797 netif_receive_skb(skb); 798 799 return 1; 800 } 801 802 /** 803 * xcanfd_rx - Is called from CAN isr to complete the received 804 * frame processing 805 * @ndev: Pointer to net_device structure 806 * @frame_base: Register offset to the frame to be read 807 * 808 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 809 * does minimal processing and invokes "netif_receive_skb" to complete further 810 * processing. 811 * Return: 1 on success and 0 on failure. 812 */ 813 static int xcanfd_rx(struct net_device *ndev, int frame_base) 814 { 815 struct xcan_priv *priv = netdev_priv(ndev); 816 struct net_device_stats *stats = &ndev->stats; 817 struct canfd_frame *cf; 818 struct sk_buff *skb; 819 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, fsr, readindex; 820 821 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 822 if (fsr & XCAN_FSR_FL_MASK) { 823 readindex = fsr & XCAN_FSR_RI_MASK; 824 id_xcan = priv->read_reg(priv, 825 XCAN_FRAME_ID_OFFSET(frame_base)); 826 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); 827 if (dlc & XCAN_DLCR_EDL_MASK) 828 skb = alloc_canfd_skb(ndev, &cf); 829 else 830 skb = alloc_can_skb(ndev, (struct can_frame **)&cf); 831 832 if (unlikely(!skb)) { 833 stats->rx_dropped++; 834 return 0; 835 } 836 837 /* Change Xilinx CANFD data length format to socketCAN data 838 * format 839 */ 840 if (dlc & XCAN_DLCR_EDL_MASK) 841 cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 842 XCAN_DLCR_DLC_SHIFT); 843 else 844 cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >> 845 XCAN_DLCR_DLC_SHIFT); 846 847 /* Change Xilinx CAN ID format to socketCAN ID format */ 848 if (id_xcan & XCAN_IDR_IDE_MASK) { 849 /* The received frame is an Extended format frame */ 850 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 851 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 852 XCAN_IDR_ID2_SHIFT; 853 cf->can_id |= CAN_EFF_FLAG; 854 if (id_xcan & XCAN_IDR_RTR_MASK) 855 cf->can_id |= CAN_RTR_FLAG; 856 } else { 857 /* The received frame is a standard format frame */ 858 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 859 XCAN_IDR_ID1_SHIFT; 860 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & 861 XCAN_IDR_SRR_MASK)) 862 cf->can_id |= CAN_RTR_FLAG; 863 } 864 865 /* Check the frame received is FD or not*/ 866 if (dlc & XCAN_DLCR_EDL_MASK) { 867 for (i = 0; i < cf->len; i += 4) { 868 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 869 data[0] = priv->read_reg(priv, 870 (XCAN_RXMSG_2_FRAME_OFFSET(readindex) + 871 (dwindex * XCANFD_DW_BYTES))); 872 else 873 data[0] = priv->read_reg(priv, 874 (XCAN_RXMSG_FRAME_OFFSET(readindex) + 875 (dwindex * XCANFD_DW_BYTES))); 876 *(__be32 *)(cf->data + i) = 877 cpu_to_be32(data[0]); 878 dwindex++; 879 } 880 } else { 881 for (i = 0; i < cf->len; i += 4) { 882 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 883 data[0] = priv->read_reg(priv, 884 XCAN_RXMSG_2_FRAME_OFFSET(readindex) + i); 885 else 886 data[0] = priv->read_reg(priv, 887 XCAN_RXMSG_FRAME_OFFSET(readindex) + i); 888 *(__be32 *)(cf->data + i) = 889 cpu_to_be32(data[0]); 890 } 891 } 892 /* Update FSR Register so that next packet will save to 893 * buffer 894 */ 895 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 896 fsr |= XCAN_FSR_IRI_MASK; 897 priv->write_reg(priv, XCAN_FSR_OFFSET, fsr); 898 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 899 stats->rx_bytes += cf->len; 900 stats->rx_packets++; 901 netif_receive_skb(skb); 902 903 return 1; 904 } 905 /* If FSR Register is not updated with fill level */ 906 return 0; 907 } 908 909 /** 910 * xcan_current_error_state - Get current error state from HW 911 * @ndev: Pointer to net_device structure 912 * 913 * Checks the current CAN error state from the HW. Note that this 914 * only checks for ERROR_PASSIVE and ERROR_WARNING. 915 * 916 * Return: 917 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 918 * otherwise. 919 */ 920 static enum can_state xcan_current_error_state(struct net_device *ndev) 921 { 922 struct xcan_priv *priv = netdev_priv(ndev); 923 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 924 925 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 926 return CAN_STATE_ERROR_PASSIVE; 927 else if (status & XCAN_SR_ERRWRN_MASK) 928 return CAN_STATE_ERROR_WARNING; 929 else 930 return CAN_STATE_ERROR_ACTIVE; 931 } 932 933 /** 934 * xcan_set_error_state - Set new CAN error state 935 * @ndev: Pointer to net_device structure 936 * @new_state: The new CAN state to be set 937 * @cf: Error frame to be populated or NULL 938 * 939 * Set new CAN error state for the device, updating statistics and 940 * populating the error frame if given. 941 */ 942 static void xcan_set_error_state(struct net_device *ndev, 943 enum can_state new_state, 944 struct can_frame *cf) 945 { 946 struct xcan_priv *priv = netdev_priv(ndev); 947 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 948 u32 txerr = ecr & XCAN_ECR_TEC_MASK; 949 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 950 enum can_state tx_state = txerr >= rxerr ? new_state : 0; 951 enum can_state rx_state = txerr <= rxerr ? new_state : 0; 952 953 /* non-ERROR states are handled elsewhere */ 954 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) 955 return; 956 957 can_change_state(ndev, cf, tx_state, rx_state); 958 959 if (cf) { 960 cf->data[6] = txerr; 961 cf->data[7] = rxerr; 962 } 963 } 964 965 /** 966 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 967 * @ndev: Pointer to net_device structure 968 * 969 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 970 * the performed RX/TX has caused it to drop to a lesser state and set 971 * the interface state accordingly. 972 */ 973 static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 974 { 975 struct xcan_priv *priv = netdev_priv(ndev); 976 enum can_state old_state = priv->can.state; 977 enum can_state new_state; 978 979 /* changing error state due to successful frame RX/TX can only 980 * occur from these states 981 */ 982 if (old_state != CAN_STATE_ERROR_WARNING && 983 old_state != CAN_STATE_ERROR_PASSIVE) 984 return; 985 986 new_state = xcan_current_error_state(ndev); 987 988 if (new_state != old_state) { 989 struct sk_buff *skb; 990 struct can_frame *cf; 991 992 skb = alloc_can_err_skb(ndev, &cf); 993 994 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 995 996 if (skb) { 997 struct net_device_stats *stats = &ndev->stats; 998 999 stats->rx_packets++; 1000 stats->rx_bytes += cf->can_dlc; 1001 netif_rx(skb); 1002 } 1003 } 1004 } 1005 1006 /** 1007 * xcan_err_interrupt - error frame Isr 1008 * @ndev: net_device pointer 1009 * @isr: interrupt status register value 1010 * 1011 * This is the CAN error interrupt and it will 1012 * check the the type of error and forward the error 1013 * frame to upper layers. 1014 */ 1015 static void xcan_err_interrupt(struct net_device *ndev, u32 isr) 1016 { 1017 struct xcan_priv *priv = netdev_priv(ndev); 1018 struct net_device_stats *stats = &ndev->stats; 1019 struct can_frame *cf; 1020 struct sk_buff *skb; 1021 u32 err_status; 1022 1023 skb = alloc_can_err_skb(ndev, &cf); 1024 1025 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 1026 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 1027 1028 if (isr & XCAN_IXR_BSOFF_MASK) { 1029 priv->can.state = CAN_STATE_BUS_OFF; 1030 priv->can.can_stats.bus_off++; 1031 /* Leave device in Config Mode in bus-off state */ 1032 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1033 can_bus_off(ndev); 1034 if (skb) 1035 cf->can_id |= CAN_ERR_BUSOFF; 1036 } else { 1037 enum can_state new_state = xcan_current_error_state(ndev); 1038 1039 if (new_state != priv->can.state) 1040 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 1041 } 1042 1043 /* Check for Arbitration lost interrupt */ 1044 if (isr & XCAN_IXR_ARBLST_MASK) { 1045 priv->can.can_stats.arbitration_lost++; 1046 if (skb) { 1047 cf->can_id |= CAN_ERR_LOSTARB; 1048 cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; 1049 } 1050 } 1051 1052 /* Check for RX FIFO Overflow interrupt */ 1053 if (isr & XCAN_IXR_RXOFLW_MASK) { 1054 stats->rx_over_errors++; 1055 stats->rx_errors++; 1056 if (skb) { 1057 cf->can_id |= CAN_ERR_CRTL; 1058 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1059 } 1060 } 1061 1062 /* Check for RX Match Not Finished interrupt */ 1063 if (isr & XCAN_IXR_RXMNF_MASK) { 1064 stats->rx_dropped++; 1065 stats->rx_errors++; 1066 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1067 if (skb) { 1068 cf->can_id |= CAN_ERR_CRTL; 1069 cf->data[1] |= CAN_ERR_CRTL_UNSPEC; 1070 } 1071 } 1072 1073 /* Check for error interrupt */ 1074 if (isr & XCAN_IXR_ERROR_MASK) { 1075 if (skb) 1076 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1077 1078 /* Check for Ack error interrupt */ 1079 if (err_status & XCAN_ESR_ACKER_MASK) { 1080 stats->tx_errors++; 1081 if (skb) { 1082 cf->can_id |= CAN_ERR_ACK; 1083 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 1084 } 1085 } 1086 1087 /* Check for Bit error interrupt */ 1088 if (err_status & XCAN_ESR_BERR_MASK) { 1089 stats->tx_errors++; 1090 if (skb) { 1091 cf->can_id |= CAN_ERR_PROT; 1092 cf->data[2] = CAN_ERR_PROT_BIT; 1093 } 1094 } 1095 1096 /* Check for Stuff error interrupt */ 1097 if (err_status & XCAN_ESR_STER_MASK) { 1098 stats->rx_errors++; 1099 if (skb) { 1100 cf->can_id |= CAN_ERR_PROT; 1101 cf->data[2] = CAN_ERR_PROT_STUFF; 1102 } 1103 } 1104 1105 /* Check for Form error interrupt */ 1106 if (err_status & XCAN_ESR_FMER_MASK) { 1107 stats->rx_errors++; 1108 if (skb) { 1109 cf->can_id |= CAN_ERR_PROT; 1110 cf->data[2] = CAN_ERR_PROT_FORM; 1111 } 1112 } 1113 1114 /* Check for CRC error interrupt */ 1115 if (err_status & XCAN_ESR_CRCER_MASK) { 1116 stats->rx_errors++; 1117 if (skb) { 1118 cf->can_id |= CAN_ERR_PROT; 1119 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1120 } 1121 } 1122 priv->can.can_stats.bus_error++; 1123 } 1124 1125 if (skb) { 1126 stats->rx_packets++; 1127 stats->rx_bytes += cf->can_dlc; 1128 netif_rx(skb); 1129 } 1130 1131 netdev_dbg(ndev, "%s: error status register:0x%x\n", 1132 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); 1133 } 1134 1135 /** 1136 * xcan_state_interrupt - It will check the state of the CAN device 1137 * @ndev: net_device pointer 1138 * @isr: interrupt status register value 1139 * 1140 * This will checks the state of the CAN device 1141 * and puts the device into appropriate state. 1142 */ 1143 static void xcan_state_interrupt(struct net_device *ndev, u32 isr) 1144 { 1145 struct xcan_priv *priv = netdev_priv(ndev); 1146 1147 /* Check for Sleep interrupt if set put CAN device in sleep state */ 1148 if (isr & XCAN_IXR_SLP_MASK) 1149 priv->can.state = CAN_STATE_SLEEPING; 1150 1151 /* Check for Wake up interrupt if set put CAN device in Active state */ 1152 if (isr & XCAN_IXR_WKUP_MASK) 1153 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1154 } 1155 1156 /** 1157 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame 1158 * @priv: Driver private data structure 1159 * 1160 * Return: Register offset of the next frame in RX FIFO. 1161 */ 1162 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) 1163 { 1164 int offset; 1165 1166 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { 1167 u32 fsr; 1168 1169 /* clear RXOK before the is-empty check so that any newly 1170 * received frame will reassert it without a race 1171 */ 1172 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); 1173 1174 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 1175 1176 /* check if RX FIFO is empty */ 1177 if (!(fsr & XCAN_FSR_FL_MASK)) 1178 return -ENOENT; 1179 1180 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1181 offset = 1182 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1183 else 1184 offset = 1185 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1186 1187 } else { 1188 /* check if RX FIFO is empty */ 1189 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & 1190 XCAN_IXR_RXNEMP_MASK)) 1191 return -ENOENT; 1192 1193 /* frames are read from a static offset */ 1194 offset = XCAN_RXFIFO_OFFSET; 1195 } 1196 1197 return offset; 1198 } 1199 1200 /** 1201 * xcan_rx_poll - Poll routine for rx packets (NAPI) 1202 * @napi: napi structure pointer 1203 * @quota: Max number of rx packets to be processed. 1204 * 1205 * This is the poll routine for rx part. 1206 * It will process the packets maximux quota value. 1207 * 1208 * Return: number of packets received 1209 */ 1210 static int xcan_rx_poll(struct napi_struct *napi, int quota) 1211 { 1212 struct net_device *ndev = napi->dev; 1213 struct xcan_priv *priv = netdev_priv(ndev); 1214 u32 ier; 1215 int work_done = 0; 1216 int frame_offset; 1217 1218 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && 1219 (work_done < quota)) { 1220 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) 1221 work_done += xcanfd_rx(ndev, frame_offset); 1222 else 1223 work_done += xcan_rx(ndev, frame_offset); 1224 1225 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 1226 /* increment read index */ 1227 priv->write_reg(priv, XCAN_FSR_OFFSET, 1228 XCAN_FSR_IRI_MASK); 1229 else 1230 /* clear rx-not-empty (will actually clear only if 1231 * empty) 1232 */ 1233 priv->write_reg(priv, XCAN_ICR_OFFSET, 1234 XCAN_IXR_RXNEMP_MASK); 1235 } 1236 1237 if (work_done) { 1238 can_led_event(ndev, CAN_LED_EVENT_RX); 1239 xcan_update_error_state_after_rxtx(ndev); 1240 } 1241 1242 if (work_done < quota) { 1243 napi_complete_done(napi, work_done); 1244 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1245 ier |= xcan_rx_int_mask(priv); 1246 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1247 } 1248 return work_done; 1249 } 1250 1251 /** 1252 * xcan_tx_interrupt - Tx Done Isr 1253 * @ndev: net_device pointer 1254 * @isr: Interrupt status register value 1255 */ 1256 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) 1257 { 1258 struct xcan_priv *priv = netdev_priv(ndev); 1259 struct net_device_stats *stats = &ndev->stats; 1260 unsigned int frames_in_fifo; 1261 int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 1262 unsigned long flags; 1263 int retries = 0; 1264 1265 /* Synchronize with xmit as we need to know the exact number 1266 * of frames in the FIFO to stay in sync due to the TXFEMP 1267 * handling. 1268 * This also prevents a race between netif_wake_queue() and 1269 * netif_stop_queue(). 1270 */ 1271 spin_lock_irqsave(&priv->tx_lock, flags); 1272 1273 frames_in_fifo = priv->tx_head - priv->tx_tail; 1274 1275 if (WARN_ON_ONCE(frames_in_fifo == 0)) { 1276 /* clear TXOK anyway to avoid getting back here */ 1277 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1278 spin_unlock_irqrestore(&priv->tx_lock, flags); 1279 return; 1280 } 1281 1282 /* Check if 2 frames were sent (TXOK only means that at least 1 1283 * frame was sent). 1284 */ 1285 if (frames_in_fifo > 1) { 1286 WARN_ON(frames_in_fifo > priv->tx_max); 1287 1288 /* Synchronize TXOK and isr so that after the loop: 1289 * (1) isr variable is up-to-date at least up to TXOK clear 1290 * time. This avoids us clearing a TXOK of a second frame 1291 * but not noticing that the FIFO is now empty and thus 1292 * marking only a single frame as sent. 1293 * (2) No TXOK is left. Having one could mean leaving a 1294 * stray TXOK as we might process the associated frame 1295 * via TXFEMP handling as we read TXFEMP *after* TXOK 1296 * clear to satisfy (1). 1297 */ 1298 while ((isr & XCAN_IXR_TXOK_MASK) && 1299 !WARN_ON(++retries == 100)) { 1300 priv->write_reg(priv, XCAN_ICR_OFFSET, 1301 XCAN_IXR_TXOK_MASK); 1302 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1303 } 1304 1305 if (isr & XCAN_IXR_TXFEMP_MASK) { 1306 /* nothing in FIFO anymore */ 1307 frames_sent = frames_in_fifo; 1308 } 1309 } else { 1310 /* single frame in fifo, just clear TXOK */ 1311 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1312 } 1313 1314 while (frames_sent--) { 1315 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % 1316 priv->tx_max); 1317 priv->tx_tail++; 1318 stats->tx_packets++; 1319 } 1320 1321 netif_wake_queue(ndev); 1322 1323 spin_unlock_irqrestore(&priv->tx_lock, flags); 1324 1325 can_led_event(ndev, CAN_LED_EVENT_TX); 1326 xcan_update_error_state_after_rxtx(ndev); 1327 } 1328 1329 /** 1330 * xcan_interrupt - CAN Isr 1331 * @irq: irq number 1332 * @dev_id: device id poniter 1333 * 1334 * This is the xilinx CAN Isr. It checks for the type of interrupt 1335 * and invokes the corresponding ISR. 1336 * 1337 * Return: 1338 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise 1339 */ 1340 static irqreturn_t xcan_interrupt(int irq, void *dev_id) 1341 { 1342 struct net_device *ndev = (struct net_device *)dev_id; 1343 struct xcan_priv *priv = netdev_priv(ndev); 1344 u32 isr, ier; 1345 u32 isr_errors; 1346 u32 rx_int_mask = xcan_rx_int_mask(priv); 1347 1348 /* Get the interrupt status from Xilinx CAN */ 1349 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1350 if (!isr) 1351 return IRQ_NONE; 1352 1353 /* Check for the type of interrupt and Processing it */ 1354 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { 1355 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | 1356 XCAN_IXR_WKUP_MASK)); 1357 xcan_state_interrupt(ndev, isr); 1358 } 1359 1360 /* Check for Tx interrupt and Processing it */ 1361 if (isr & XCAN_IXR_TXOK_MASK) 1362 xcan_tx_interrupt(ndev, isr); 1363 1364 /* Check for the type of error interrupt and Processing it */ 1365 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1366 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | 1367 XCAN_IXR_RXMNF_MASK); 1368 if (isr_errors) { 1369 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 1370 xcan_err_interrupt(ndev, isr); 1371 } 1372 1373 /* Check for the type of receive interrupt and Processing it */ 1374 if (isr & rx_int_mask) { 1375 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1376 ier &= ~rx_int_mask; 1377 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1378 napi_schedule(&priv->napi); 1379 } 1380 return IRQ_HANDLED; 1381 } 1382 1383 /** 1384 * xcan_chip_stop - Driver stop routine 1385 * @ndev: Pointer to net_device structure 1386 * 1387 * This is the drivers stop routine. It will disable the 1388 * interrupts and put the device into configuration mode. 1389 */ 1390 static void xcan_chip_stop(struct net_device *ndev) 1391 { 1392 struct xcan_priv *priv = netdev_priv(ndev); 1393 1394 /* Disable interrupts and leave the can in configuration mode */ 1395 set_reset_mode(ndev); 1396 priv->can.state = CAN_STATE_STOPPED; 1397 } 1398 1399 /** 1400 * xcan_open - Driver open routine 1401 * @ndev: Pointer to net_device structure 1402 * 1403 * This is the driver open routine. 1404 * Return: 0 on success and failure value on error 1405 */ 1406 static int xcan_open(struct net_device *ndev) 1407 { 1408 struct xcan_priv *priv = netdev_priv(ndev); 1409 int ret; 1410 1411 ret = pm_runtime_get_sync(priv->dev); 1412 if (ret < 0) { 1413 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1414 __func__, ret); 1415 return ret; 1416 } 1417 1418 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, 1419 ndev->name, ndev); 1420 if (ret < 0) { 1421 netdev_err(ndev, "irq allocation for CAN failed\n"); 1422 goto err; 1423 } 1424 1425 /* Set chip into reset mode */ 1426 ret = set_reset_mode(ndev); 1427 if (ret < 0) { 1428 netdev_err(ndev, "mode resetting failed!\n"); 1429 goto err_irq; 1430 } 1431 1432 /* Common open */ 1433 ret = open_candev(ndev); 1434 if (ret) 1435 goto err_irq; 1436 1437 ret = xcan_chip_start(ndev); 1438 if (ret < 0) { 1439 netdev_err(ndev, "xcan_chip_start failed!\n"); 1440 goto err_candev; 1441 } 1442 1443 can_led_event(ndev, CAN_LED_EVENT_OPEN); 1444 napi_enable(&priv->napi); 1445 netif_start_queue(ndev); 1446 1447 return 0; 1448 1449 err_candev: 1450 close_candev(ndev); 1451 err_irq: 1452 free_irq(ndev->irq, ndev); 1453 err: 1454 pm_runtime_put(priv->dev); 1455 1456 return ret; 1457 } 1458 1459 /** 1460 * xcan_close - Driver close routine 1461 * @ndev: Pointer to net_device structure 1462 * 1463 * Return: 0 always 1464 */ 1465 static int xcan_close(struct net_device *ndev) 1466 { 1467 struct xcan_priv *priv = netdev_priv(ndev); 1468 1469 netif_stop_queue(ndev); 1470 napi_disable(&priv->napi); 1471 xcan_chip_stop(ndev); 1472 free_irq(ndev->irq, ndev); 1473 close_candev(ndev); 1474 1475 can_led_event(ndev, CAN_LED_EVENT_STOP); 1476 pm_runtime_put(priv->dev); 1477 1478 return 0; 1479 } 1480 1481 /** 1482 * xcan_get_berr_counter - error counter routine 1483 * @ndev: Pointer to net_device structure 1484 * @bec: Pointer to can_berr_counter structure 1485 * 1486 * This is the driver error counter routine. 1487 * Return: 0 on success and failure value on error 1488 */ 1489 static int xcan_get_berr_counter(const struct net_device *ndev, 1490 struct can_berr_counter *bec) 1491 { 1492 struct xcan_priv *priv = netdev_priv(ndev); 1493 int ret; 1494 1495 ret = pm_runtime_get_sync(priv->dev); 1496 if (ret < 0) { 1497 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1498 __func__, ret); 1499 return ret; 1500 } 1501 1502 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 1503 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 1504 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 1505 1506 pm_runtime_put(priv->dev); 1507 1508 return 0; 1509 } 1510 1511 static const struct net_device_ops xcan_netdev_ops = { 1512 .ndo_open = xcan_open, 1513 .ndo_stop = xcan_close, 1514 .ndo_start_xmit = xcan_start_xmit, 1515 .ndo_change_mtu = can_change_mtu, 1516 }; 1517 1518 /** 1519 * xcan_suspend - Suspend method for the driver 1520 * @dev: Address of the device structure 1521 * 1522 * Put the driver into low power mode. 1523 * Return: 0 on success and failure value on error 1524 */ 1525 static int __maybe_unused xcan_suspend(struct device *dev) 1526 { 1527 struct net_device *ndev = dev_get_drvdata(dev); 1528 1529 if (netif_running(ndev)) { 1530 netif_stop_queue(ndev); 1531 netif_device_detach(ndev); 1532 xcan_chip_stop(ndev); 1533 } 1534 1535 return pm_runtime_force_suspend(dev); 1536 } 1537 1538 /** 1539 * xcan_resume - Resume from suspend 1540 * @dev: Address of the device structure 1541 * 1542 * Resume operation after suspend. 1543 * Return: 0 on success and failure value on error 1544 */ 1545 static int __maybe_unused xcan_resume(struct device *dev) 1546 { 1547 struct net_device *ndev = dev_get_drvdata(dev); 1548 int ret; 1549 1550 ret = pm_runtime_force_resume(dev); 1551 if (ret) { 1552 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 1553 return ret; 1554 } 1555 1556 if (netif_running(ndev)) { 1557 ret = xcan_chip_start(ndev); 1558 if (ret) { 1559 dev_err(dev, "xcan_chip_start failed on resume\n"); 1560 return ret; 1561 } 1562 1563 netif_device_attach(ndev); 1564 netif_start_queue(ndev); 1565 } 1566 1567 return 0; 1568 } 1569 1570 /** 1571 * xcan_runtime_suspend - Runtime suspend method for the driver 1572 * @dev: Address of the device structure 1573 * 1574 * Put the driver into low power mode. 1575 * Return: 0 always 1576 */ 1577 static int __maybe_unused xcan_runtime_suspend(struct device *dev) 1578 { 1579 struct net_device *ndev = dev_get_drvdata(dev); 1580 struct xcan_priv *priv = netdev_priv(ndev); 1581 1582 clk_disable_unprepare(priv->bus_clk); 1583 clk_disable_unprepare(priv->can_clk); 1584 1585 return 0; 1586 } 1587 1588 /** 1589 * xcan_runtime_resume - Runtime resume from suspend 1590 * @dev: Address of the device structure 1591 * 1592 * Resume operation after suspend. 1593 * Return: 0 on success and failure value on error 1594 */ 1595 static int __maybe_unused xcan_runtime_resume(struct device *dev) 1596 { 1597 struct net_device *ndev = dev_get_drvdata(dev); 1598 struct xcan_priv *priv = netdev_priv(ndev); 1599 int ret; 1600 1601 ret = clk_prepare_enable(priv->bus_clk); 1602 if (ret) { 1603 dev_err(dev, "Cannot enable clock.\n"); 1604 return ret; 1605 } 1606 ret = clk_prepare_enable(priv->can_clk); 1607 if (ret) { 1608 dev_err(dev, "Cannot enable clock.\n"); 1609 clk_disable_unprepare(priv->bus_clk); 1610 return ret; 1611 } 1612 1613 return 0; 1614 } 1615 1616 static const struct dev_pm_ops xcan_dev_pm_ops = { 1617 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1618 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1619 }; 1620 1621 static const struct xcan_devtype_data xcan_zynq_data = { 1622 .cantype = XZYNQ_CANPS, 1623 .flags = XCAN_FLAG_TXFEMP, 1624 .bittiming_const = &xcan_bittiming_const, 1625 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1626 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1627 .bus_clk_name = "pclk", 1628 }; 1629 1630 static const struct xcan_devtype_data xcan_axi_data = { 1631 .cantype = XAXI_CAN, 1632 .flags = XCAN_FLAG_TXFEMP, 1633 .bittiming_const = &xcan_bittiming_const, 1634 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1635 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1636 .bus_clk_name = "s_axi_aclk", 1637 }; 1638 1639 static const struct xcan_devtype_data xcan_canfd_data = { 1640 .cantype = XAXI_CANFD, 1641 .flags = XCAN_FLAG_EXT_FILTERS | 1642 XCAN_FLAG_RXMNF | 1643 XCAN_FLAG_TX_MAILBOXES | 1644 XCAN_FLAG_RX_FIFO_MULTI, 1645 .bittiming_const = &xcan_bittiming_const_canfd, 1646 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1647 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1648 .bus_clk_name = "s_axi_aclk", 1649 }; 1650 1651 static const struct xcan_devtype_data xcan_canfd2_data = { 1652 .cantype = XAXI_CANFD_2_0, 1653 .flags = XCAN_FLAG_EXT_FILTERS | 1654 XCAN_FLAG_RXMNF | 1655 XCAN_FLAG_TX_MAILBOXES | 1656 XCAN_FLAG_CANFD_2 | 1657 XCAN_FLAG_RX_FIFO_MULTI, 1658 .bittiming_const = &xcan_bittiming_const_canfd2, 1659 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1660 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1661 .bus_clk_name = "s_axi_aclk", 1662 }; 1663 1664 /* Match table for OF platform binding */ 1665 static const struct of_device_id xcan_of_match[] = { 1666 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1667 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, 1668 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, 1669 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, 1670 { /* end of list */ }, 1671 }; 1672 MODULE_DEVICE_TABLE(of, xcan_of_match); 1673 1674 /** 1675 * xcan_probe - Platform registration call 1676 * @pdev: Handle to the platform device structure 1677 * 1678 * This function does all the memory allocation and registration for the CAN 1679 * device. 1680 * 1681 * Return: 0 on success and failure value on error 1682 */ 1683 static int xcan_probe(struct platform_device *pdev) 1684 { 1685 struct resource *res; /* IO mem resources */ 1686 struct net_device *ndev; 1687 struct xcan_priv *priv; 1688 const struct of_device_id *of_id; 1689 const struct xcan_devtype_data *devtype = &xcan_axi_data; 1690 void __iomem *addr; 1691 int ret; 1692 int rx_max, tx_max; 1693 int hw_tx_max, hw_rx_max; 1694 const char *hw_tx_max_property; 1695 1696 /* Get the virtual base address for the device */ 1697 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1698 addr = devm_ioremap_resource(&pdev->dev, res); 1699 if (IS_ERR(addr)) { 1700 ret = PTR_ERR(addr); 1701 goto err; 1702 } 1703 1704 of_id = of_match_device(xcan_of_match, &pdev->dev); 1705 if (of_id && of_id->data) 1706 devtype = of_id->data; 1707 1708 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? 1709 "tx-mailbox-count" : "tx-fifo-depth"; 1710 1711 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, 1712 &hw_tx_max); 1713 if (ret < 0) { 1714 dev_err(&pdev->dev, "missing %s property\n", 1715 hw_tx_max_property); 1716 goto err; 1717 } 1718 1719 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", 1720 &hw_rx_max); 1721 if (ret < 0) { 1722 dev_err(&pdev->dev, 1723 "missing rx-fifo-depth property (mailbox mode is not supported)\n"); 1724 goto err; 1725 } 1726 1727 /* With TX FIFO: 1728 * 1729 * There is no way to directly figure out how many frames have been 1730 * sent when the TXOK interrupt is processed. If TXFEMP 1731 * is supported, we can have 2 frames in the FIFO and use TXFEMP 1732 * to determine if 1 or 2 frames have been sent. 1733 * Theoretically we should be able to use TXFWMEMP to determine up 1734 * to 3 frames, but it seems that after putting a second frame in the 1735 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1736 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1737 * sent), which is not a sensible state - possibly TXFWMEMP is not 1738 * completely synchronized with the rest of the bits? 1739 * 1740 * With TX mailboxes: 1741 * 1742 * HW sends frames in CAN ID priority order. To preserve FIFO ordering 1743 * we submit frames one at a time. 1744 */ 1745 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && 1746 (devtype->flags & XCAN_FLAG_TXFEMP)) 1747 tx_max = min(hw_tx_max, 2); 1748 else 1749 tx_max = 1; 1750 1751 rx_max = hw_rx_max; 1752 1753 /* Create a CAN device instance */ 1754 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1755 if (!ndev) 1756 return -ENOMEM; 1757 1758 priv = netdev_priv(ndev); 1759 priv->dev = &pdev->dev; 1760 priv->can.bittiming_const = devtype->bittiming_const; 1761 priv->can.do_set_mode = xcan_do_set_mode; 1762 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1763 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1764 CAN_CTRLMODE_BERR_REPORTING; 1765 1766 if (devtype->cantype == XAXI_CANFD) 1767 priv->can.data_bittiming_const = 1768 &xcan_data_bittiming_const_canfd; 1769 1770 if (devtype->cantype == XAXI_CANFD_2_0) 1771 priv->can.data_bittiming_const = 1772 &xcan_data_bittiming_const_canfd2; 1773 1774 if (devtype->cantype == XAXI_CANFD || 1775 devtype->cantype == XAXI_CANFD_2_0) 1776 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; 1777 1778 priv->reg_base = addr; 1779 priv->tx_max = tx_max; 1780 priv->devtype = *devtype; 1781 spin_lock_init(&priv->tx_lock); 1782 1783 /* Get IRQ for the device */ 1784 ndev->irq = platform_get_irq(pdev, 0); 1785 ndev->flags |= IFF_ECHO; /* We support local echo */ 1786 1787 platform_set_drvdata(pdev, ndev); 1788 SET_NETDEV_DEV(ndev, &pdev->dev); 1789 ndev->netdev_ops = &xcan_netdev_ops; 1790 1791 /* Getting the CAN can_clk info */ 1792 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1793 if (IS_ERR(priv->can_clk)) { 1794 dev_err(&pdev->dev, "Device clock not found.\n"); 1795 ret = PTR_ERR(priv->can_clk); 1796 goto err_free; 1797 } 1798 1799 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1800 if (IS_ERR(priv->bus_clk)) { 1801 dev_err(&pdev->dev, "bus clock not found\n"); 1802 ret = PTR_ERR(priv->bus_clk); 1803 goto err_free; 1804 } 1805 1806 priv->write_reg = xcan_write_reg_le; 1807 priv->read_reg = xcan_read_reg_le; 1808 1809 pm_runtime_enable(&pdev->dev); 1810 ret = pm_runtime_get_sync(&pdev->dev); 1811 if (ret < 0) { 1812 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1813 __func__, ret); 1814 goto err_pmdisable; 1815 } 1816 1817 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { 1818 priv->write_reg = xcan_write_reg_be; 1819 priv->read_reg = xcan_read_reg_be; 1820 } 1821 1822 priv->can.clock.freq = clk_get_rate(priv->can_clk); 1823 1824 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); 1825 1826 ret = register_candev(ndev); 1827 if (ret) { 1828 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); 1829 goto err_disableclks; 1830 } 1831 1832 devm_can_led_init(ndev); 1833 1834 pm_runtime_put(&pdev->dev); 1835 1836 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", 1837 priv->reg_base, ndev->irq, priv->can.clock.freq, 1838 hw_tx_max, priv->tx_max); 1839 1840 return 0; 1841 1842 err_disableclks: 1843 pm_runtime_put(priv->dev); 1844 err_pmdisable: 1845 pm_runtime_disable(&pdev->dev); 1846 err_free: 1847 free_candev(ndev); 1848 err: 1849 return ret; 1850 } 1851 1852 /** 1853 * xcan_remove - Unregister the device after releasing the resources 1854 * @pdev: Handle to the platform device structure 1855 * 1856 * This function frees all the resources allocated to the device. 1857 * Return: 0 always 1858 */ 1859 static int xcan_remove(struct platform_device *pdev) 1860 { 1861 struct net_device *ndev = platform_get_drvdata(pdev); 1862 struct xcan_priv *priv = netdev_priv(ndev); 1863 1864 unregister_candev(ndev); 1865 pm_runtime_disable(&pdev->dev); 1866 netif_napi_del(&priv->napi); 1867 free_candev(ndev); 1868 1869 return 0; 1870 } 1871 1872 static struct platform_driver xcan_driver = { 1873 .probe = xcan_probe, 1874 .remove = xcan_remove, 1875 .driver = { 1876 .name = DRIVER_NAME, 1877 .pm = &xcan_dev_pm_ops, 1878 .of_match_table = xcan_of_match, 1879 }, 1880 }; 1881 1882 module_platform_driver(xcan_driver); 1883 1884 MODULE_LICENSE("GPL"); 1885 MODULE_AUTHOR("Xilinx Inc"); 1886 MODULE_DESCRIPTION("Xilinx CAN interface"); 1887