1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xilinx CAN device driver 3 * 4 * Copyright (C) 2012 - 2014 Xilinx, Inc. 5 * Copyright (C) 2009 PetaLogix. All rights reserved. 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy 7 * 8 * Description: 9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/skbuff.h> 24 #include <linux/spinlock.h> 25 #include <linux/string.h> 26 #include <linux/types.h> 27 #include <linux/can/dev.h> 28 #include <linux/can/error.h> 29 #include <linux/can/led.h> 30 #include <linux/pm_runtime.h> 31 32 #define DRIVER_NAME "xilinx_can" 33 34 /* CAN registers set */ 35 enum xcan_reg { 36 XCAN_SRR_OFFSET = 0x00, /* Software reset */ 37 XCAN_MSR_OFFSET = 0x04, /* Mode select */ 38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ 39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ 40 XCAN_ECR_OFFSET = 0x10, /* Error counter */ 41 XCAN_ESR_OFFSET = 0x14, /* Error status */ 42 XCAN_SR_OFFSET = 0x18, /* Status */ 43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 46 47 /* not on CAN FD cores */ 48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ 49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ 50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ 51 52 /* only on CAN FD cores */ 53 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate 54 * Prescalar 55 */ 56 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ 57 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ 58 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ 59 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ 60 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ 61 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ 62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ 63 XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ 64 XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ 65 }; 66 67 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) 68 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) 69 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) 70 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) 71 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) 72 73 #define XCAN_CANFD_FRAME_SIZE 0x48 74 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ 75 XCAN_CANFD_FRAME_SIZE * (n)) 76 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ 77 XCAN_CANFD_FRAME_SIZE * (n)) 78 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ 79 XCAN_CANFD_FRAME_SIZE * (n)) 80 81 /* the single TX mailbox used by this driver on CAN FD HW */ 82 #define XCAN_TX_MAILBOX_IDX 0 83 84 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 85 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 86 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 87 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ 88 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ 89 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ 90 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 91 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 92 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 93 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ 94 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ 95 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ 96 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 97 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 98 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 99 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ 100 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ 101 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ 102 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ 103 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ 104 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ 105 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ 106 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 107 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 108 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 109 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ 110 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 111 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 112 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 113 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ 114 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ 115 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ 116 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ 117 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ 118 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ 119 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ 120 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ 121 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ 122 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ 123 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ 124 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 125 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 126 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 127 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ 128 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ 129 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ 130 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ 131 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ 132 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ 133 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ 134 135 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 136 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 137 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 138 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ 139 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ 140 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 141 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 142 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 143 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ 144 145 /* CAN frame length constants */ 146 #define XCAN_FRAME_MAX_DATA_LEN 8 147 #define XCANFD_DW_BYTES 4 148 #define XCAN_TIMEOUT (1 * HZ) 149 150 /* TX-FIFO-empty interrupt available */ 151 #define XCAN_FLAG_TXFEMP 0x0001 152 /* RX Match Not Finished interrupt available */ 153 #define XCAN_FLAG_RXMNF 0x0002 154 /* Extended acceptance filters with control at 0xE0 */ 155 #define XCAN_FLAG_EXT_FILTERS 0x0004 156 /* TX mailboxes instead of TX FIFO */ 157 #define XCAN_FLAG_TX_MAILBOXES 0x0008 158 /* RX FIFO with each buffer in separate registers at 0x1100 159 * instead of the regular FIFO at 0x50 160 */ 161 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 162 #define XCAN_FLAG_CANFD_2 0x0020 163 164 enum xcan_ip_type { 165 XAXI_CAN = 0, 166 XZYNQ_CANPS, 167 XAXI_CANFD, 168 XAXI_CANFD_2_0, 169 }; 170 171 struct xcan_devtype_data { 172 enum xcan_ip_type cantype; 173 unsigned int flags; 174 const struct can_bittiming_const *bittiming_const; 175 const char *bus_clk_name; 176 unsigned int btr_ts2_shift; 177 unsigned int btr_sjw_shift; 178 }; 179 180 /** 181 * struct xcan_priv - This definition define CAN driver instance 182 * @can: CAN private data structure. 183 * @tx_lock: Lock for synchronizing TX interrupt handling 184 * @tx_head: Tx CAN packets ready to send on the queue 185 * @tx_tail: Tx CAN packets successfully sended on the queue 186 * @tx_max: Maximum number packets the driver can send 187 * @napi: NAPI structure 188 * @read_reg: For reading data from CAN registers 189 * @write_reg: For writing data to CAN registers 190 * @dev: Network device data structure 191 * @reg_base: Ioremapped address to registers 192 * @irq_flags: For request_irq() 193 * @bus_clk: Pointer to struct clk 194 * @can_clk: Pointer to struct clk 195 * @devtype: Device type specific constants 196 */ 197 struct xcan_priv { 198 struct can_priv can; 199 spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ 200 unsigned int tx_head; 201 unsigned int tx_tail; 202 unsigned int tx_max; 203 struct napi_struct napi; 204 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); 205 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, 206 u32 val); 207 struct device *dev; 208 void __iomem *reg_base; 209 unsigned long irq_flags; 210 struct clk *bus_clk; 211 struct clk *can_clk; 212 struct xcan_devtype_data devtype; 213 }; 214 215 /* CAN Bittiming constants as per Xilinx CAN specs */ 216 static const struct can_bittiming_const xcan_bittiming_const = { 217 .name = DRIVER_NAME, 218 .tseg1_min = 1, 219 .tseg1_max = 16, 220 .tseg2_min = 1, 221 .tseg2_max = 8, 222 .sjw_max = 4, 223 .brp_min = 1, 224 .brp_max = 256, 225 .brp_inc = 1, 226 }; 227 228 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ 229 static const struct can_bittiming_const xcan_bittiming_const_canfd = { 230 .name = DRIVER_NAME, 231 .tseg1_min = 1, 232 .tseg1_max = 64, 233 .tseg2_min = 1, 234 .tseg2_max = 16, 235 .sjw_max = 16, 236 .brp_min = 1, 237 .brp_max = 256, 238 .brp_inc = 1, 239 }; 240 241 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ 242 static struct can_bittiming_const xcan_data_bittiming_const_canfd = { 243 .name = DRIVER_NAME, 244 .tseg1_min = 1, 245 .tseg1_max = 16, 246 .tseg2_min = 1, 247 .tseg2_max = 8, 248 .sjw_max = 8, 249 .brp_min = 1, 250 .brp_max = 256, 251 .brp_inc = 1, 252 }; 253 254 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ 255 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { 256 .name = DRIVER_NAME, 257 .tseg1_min = 1, 258 .tseg1_max = 256, 259 .tseg2_min = 1, 260 .tseg2_max = 128, 261 .sjw_max = 128, 262 .brp_min = 2, 263 .brp_max = 256, 264 .brp_inc = 1, 265 }; 266 267 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ 268 static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { 269 .name = DRIVER_NAME, 270 .tseg1_min = 1, 271 .tseg1_max = 32, 272 .tseg2_min = 1, 273 .tseg2_max = 16, 274 .sjw_max = 16, 275 .brp_min = 2, 276 .brp_max = 256, 277 .brp_inc = 1, 278 }; 279 280 /** 281 * xcan_write_reg_le - Write a value to the device register little endian 282 * @priv: Driver private data structure 283 * @reg: Register offset 284 * @val: Value to write at the Register offset 285 * 286 * Write data to the paricular CAN register 287 */ 288 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, 289 u32 val) 290 { 291 iowrite32(val, priv->reg_base + reg); 292 } 293 294 /** 295 * xcan_read_reg_le - Read a value from the device register little endian 296 * @priv: Driver private data structure 297 * @reg: Register offset 298 * 299 * Read data from the particular CAN register 300 * Return: value read from the CAN register 301 */ 302 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) 303 { 304 return ioread32(priv->reg_base + reg); 305 } 306 307 /** 308 * xcan_write_reg_be - Write a value to the device register big endian 309 * @priv: Driver private data structure 310 * @reg: Register offset 311 * @val: Value to write at the Register offset 312 * 313 * Write data to the paricular CAN register 314 */ 315 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, 316 u32 val) 317 { 318 iowrite32be(val, priv->reg_base + reg); 319 } 320 321 /** 322 * xcan_read_reg_be - Read a value from the device register big endian 323 * @priv: Driver private data structure 324 * @reg: Register offset 325 * 326 * Read data from the particular CAN register 327 * Return: value read from the CAN register 328 */ 329 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) 330 { 331 return ioread32be(priv->reg_base + reg); 332 } 333 334 /** 335 * xcan_rx_int_mask - Get the mask for the receive interrupt 336 * @priv: Driver private data structure 337 * 338 * Return: The receive interrupt mask used by the driver on this HW 339 */ 340 static u32 xcan_rx_int_mask(const struct xcan_priv *priv) 341 { 342 /* RXNEMP is better suited for our use case as it cannot be cleared 343 * while the FIFO is non-empty, but CAN FD HW does not have it 344 */ 345 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 346 return XCAN_IXR_RXOK_MASK; 347 else 348 return XCAN_IXR_RXNEMP_MASK; 349 } 350 351 /** 352 * set_reset_mode - Resets the CAN device mode 353 * @ndev: Pointer to net_device structure 354 * 355 * This is the driver reset mode routine.The driver 356 * enters into configuration mode. 357 * 358 * Return: 0 on success and failure value on error 359 */ 360 static int set_reset_mode(struct net_device *ndev) 361 { 362 struct xcan_priv *priv = netdev_priv(ndev); 363 unsigned long timeout; 364 365 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 366 367 timeout = jiffies + XCAN_TIMEOUT; 368 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { 369 if (time_after(jiffies, timeout)) { 370 netdev_warn(ndev, "timed out for config mode\n"); 371 return -ETIMEDOUT; 372 } 373 usleep_range(500, 10000); 374 } 375 376 /* reset clears FIFOs */ 377 priv->tx_head = 0; 378 priv->tx_tail = 0; 379 380 return 0; 381 } 382 383 /** 384 * xcan_set_bittiming - CAN set bit timing routine 385 * @ndev: Pointer to net_device structure 386 * 387 * This is the driver set bittiming routine. 388 * Return: 0 on success and failure value on error 389 */ 390 static int xcan_set_bittiming(struct net_device *ndev) 391 { 392 struct xcan_priv *priv = netdev_priv(ndev); 393 struct can_bittiming *bt = &priv->can.bittiming; 394 struct can_bittiming *dbt = &priv->can.data_bittiming; 395 u32 btr0, btr1; 396 u32 is_config_mode; 397 398 /* Check whether Xilinx CAN is in configuration mode. 399 * It cannot set bit timing if Xilinx CAN is not in configuration mode. 400 */ 401 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & 402 XCAN_SR_CONFIG_MASK; 403 if (!is_config_mode) { 404 netdev_alert(ndev, 405 "BUG! Cannot set bittiming - CAN is not in config mode\n"); 406 return -EPERM; 407 } 408 409 /* Setting Baud Rate prescalar value in BRPR Register */ 410 btr0 = (bt->brp - 1); 411 412 /* Setting Time Segment 1 in BTR Register */ 413 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 414 415 /* Setting Time Segment 2 in BTR Register */ 416 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 417 418 /* Setting Synchronous jump width in BTR Register */ 419 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; 420 421 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 422 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 423 424 if (priv->devtype.cantype == XAXI_CANFD || 425 priv->devtype.cantype == XAXI_CANFD_2_0) { 426 /* Setting Baud Rate prescalar value in F_BRPR Register */ 427 btr0 = dbt->brp - 1; 428 429 /* Setting Time Segment 1 in BTR Register */ 430 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; 431 432 /* Setting Time Segment 2 in BTR Register */ 433 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 434 435 /* Setting Synchronous jump width in BTR Register */ 436 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; 437 438 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); 439 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); 440 } 441 442 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", 443 priv->read_reg(priv, XCAN_BRPR_OFFSET), 444 priv->read_reg(priv, XCAN_BTR_OFFSET)); 445 446 return 0; 447 } 448 449 /** 450 * xcan_chip_start - This the drivers start routine 451 * @ndev: Pointer to net_device structure 452 * 453 * This is the drivers start routine. 454 * Based on the State of the CAN device it puts 455 * the CAN device into a proper mode. 456 * 457 * Return: 0 on success and failure value on error 458 */ 459 static int xcan_chip_start(struct net_device *ndev) 460 { 461 struct xcan_priv *priv = netdev_priv(ndev); 462 u32 reg_msr; 463 int err; 464 u32 ier; 465 466 /* Check if it is in reset mode */ 467 err = set_reset_mode(ndev); 468 if (err < 0) 469 return err; 470 471 err = xcan_set_bittiming(ndev); 472 if (err < 0) 473 return err; 474 475 /* Enable interrupts 476 * 477 * We enable the ERROR interrupt even with 478 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no 479 * dedicated interrupt for a state change to 480 * ERROR_WARNING/ERROR_PASSIVE. 481 */ 482 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 483 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 484 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 485 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); 486 487 if (priv->devtype.flags & XCAN_FLAG_RXMNF) 488 ier |= XCAN_IXR_RXMNF_MASK; 489 490 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 491 492 /* Check whether it is loopback mode or normal mode */ 493 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 494 reg_msr = XCAN_MSR_LBACK_MASK; 495 else 496 reg_msr = 0x0; 497 498 /* enable the first extended filter, if any, as cores with extended 499 * filtering default to non-receipt if all filters are disabled 500 */ 501 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) 502 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); 503 504 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 505 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 506 507 netdev_dbg(ndev, "status:#x%08x\n", 508 priv->read_reg(priv, XCAN_SR_OFFSET)); 509 510 priv->can.state = CAN_STATE_ERROR_ACTIVE; 511 return 0; 512 } 513 514 /** 515 * xcan_do_set_mode - This sets the mode of the driver 516 * @ndev: Pointer to net_device structure 517 * @mode: Tells the mode of the driver 518 * 519 * This check the drivers state and calls the 520 * the corresponding modes to set. 521 * 522 * Return: 0 on success and failure value on error 523 */ 524 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 525 { 526 int ret; 527 528 switch (mode) { 529 case CAN_MODE_START: 530 ret = xcan_chip_start(ndev); 531 if (ret < 0) { 532 netdev_err(ndev, "xcan_chip_start failed!\n"); 533 return ret; 534 } 535 netif_wake_queue(ndev); 536 break; 537 default: 538 ret = -EOPNOTSUPP; 539 break; 540 } 541 542 return ret; 543 } 544 545 /** 546 * xcan_write_frame - Write a frame to HW 547 * @ndev: Pointer to net_device structure 548 * @skb: sk_buff pointer that contains data to be Txed 549 * @frame_offset: Register offset to write the frame to 550 */ 551 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, 552 int frame_offset) 553 { 554 u32 id, dlc, data[2] = {0, 0}; 555 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 556 u32 ramoff, dwindex = 0, i; 557 struct xcan_priv *priv = netdev_priv(ndev); 558 559 /* Watch carefully on the bit sequence */ 560 if (cf->can_id & CAN_EFF_FLAG) { 561 /* Extended CAN ID format */ 562 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & 563 XCAN_IDR_ID2_MASK; 564 id |= (((cf->can_id & CAN_EFF_MASK) >> 565 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << 566 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; 567 568 /* The substibute remote TX request bit should be "1" 569 * for extended frames as in the Xilinx CAN datasheet 570 */ 571 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; 572 573 if (cf->can_id & CAN_RTR_FLAG) 574 /* Extended frames remote TX request */ 575 id |= XCAN_IDR_RTR_MASK; 576 } else { 577 /* Standard CAN ID format */ 578 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & 579 XCAN_IDR_ID1_MASK; 580 581 if (cf->can_id & CAN_RTR_FLAG) 582 /* Standard frames remote TX request */ 583 id |= XCAN_IDR_SRR_MASK; 584 } 585 586 dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; 587 if (can_is_canfd_skb(skb)) { 588 if (cf->flags & CANFD_BRS) 589 dlc |= XCAN_DLCR_BRS_MASK; 590 dlc |= XCAN_DLCR_EDL_MASK; 591 } 592 593 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && 594 (priv->devtype.flags & XCAN_FLAG_TXFEMP)) 595 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 596 else 597 can_put_echo_skb(skb, ndev, 0); 598 599 priv->tx_head++; 600 601 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 602 /* If the CAN frame is RTR frame this write triggers transmission 603 * (not on CAN FD) 604 */ 605 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); 606 if (priv->devtype.cantype == XAXI_CANFD || 607 priv->devtype.cantype == XAXI_CANFD_2_0) { 608 for (i = 0; i < cf->len; i += 4) { 609 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + 610 (dwindex * XCANFD_DW_BYTES); 611 priv->write_reg(priv, ramoff, 612 be32_to_cpup((__be32 *)(cf->data + i))); 613 dwindex++; 614 } 615 } else { 616 if (cf->len > 0) 617 data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); 618 if (cf->len > 4) 619 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 620 621 if (!(cf->can_id & CAN_RTR_FLAG)) { 622 priv->write_reg(priv, 623 XCAN_FRAME_DW1_OFFSET(frame_offset), 624 data[0]); 625 /* If the CAN frame is Standard/Extended frame this 626 * write triggers transmission (not on CAN FD) 627 */ 628 priv->write_reg(priv, 629 XCAN_FRAME_DW2_OFFSET(frame_offset), 630 data[1]); 631 } 632 } 633 } 634 635 /** 636 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) 637 * @skb: sk_buff pointer that contains data to be Txed 638 * @ndev: Pointer to net_device structure 639 * 640 * Return: 0 on success, -ENOSPC if FIFO is full. 641 */ 642 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) 643 { 644 struct xcan_priv *priv = netdev_priv(ndev); 645 unsigned long flags; 646 647 /* Check if the TX buffer is full */ 648 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & 649 XCAN_SR_TXFLL_MASK)) 650 return -ENOSPC; 651 652 spin_lock_irqsave(&priv->tx_lock, flags); 653 654 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); 655 656 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 657 if (priv->tx_max > 1) 658 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 659 660 /* Check if the TX buffer is full */ 661 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 662 netif_stop_queue(ndev); 663 664 spin_unlock_irqrestore(&priv->tx_lock, flags); 665 666 return 0; 667 } 668 669 /** 670 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) 671 * @skb: sk_buff pointer that contains data to be Txed 672 * @ndev: Pointer to net_device structure 673 * 674 * Return: 0 on success, -ENOSPC if there is no space 675 */ 676 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) 677 { 678 struct xcan_priv *priv = netdev_priv(ndev); 679 unsigned long flags; 680 681 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & 682 BIT(XCAN_TX_MAILBOX_IDX))) 683 return -ENOSPC; 684 685 spin_lock_irqsave(&priv->tx_lock, flags); 686 687 xcan_write_frame(ndev, skb, 688 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 689 690 /* Mark buffer as ready for transmit */ 691 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); 692 693 netif_stop_queue(ndev); 694 695 spin_unlock_irqrestore(&priv->tx_lock, flags); 696 697 return 0; 698 } 699 700 /** 701 * xcan_start_xmit - Starts the transmission 702 * @skb: sk_buff pointer that contains data to be Txed 703 * @ndev: Pointer to net_device structure 704 * 705 * This function is invoked from upper layers to initiate transmission. 706 * 707 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full 708 */ 709 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 710 { 711 struct xcan_priv *priv = netdev_priv(ndev); 712 int ret; 713 714 if (can_dropped_invalid_skb(ndev, skb)) 715 return NETDEV_TX_OK; 716 717 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) 718 ret = xcan_start_xmit_mailbox(skb, ndev); 719 else 720 ret = xcan_start_xmit_fifo(skb, ndev); 721 722 if (ret < 0) { 723 netdev_err(ndev, "BUG!, TX full when queue awake!\n"); 724 netif_stop_queue(ndev); 725 return NETDEV_TX_BUSY; 726 } 727 728 return NETDEV_TX_OK; 729 } 730 731 /** 732 * xcan_rx - Is called from CAN isr to complete the received 733 * frame processing 734 * @ndev: Pointer to net_device structure 735 * @frame_base: Register offset to the frame to be read 736 * 737 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 738 * does minimal processing and invokes "netif_receive_skb" to complete further 739 * processing. 740 * Return: 1 on success and 0 on failure. 741 */ 742 static int xcan_rx(struct net_device *ndev, int frame_base) 743 { 744 struct xcan_priv *priv = netdev_priv(ndev); 745 struct net_device_stats *stats = &ndev->stats; 746 struct can_frame *cf; 747 struct sk_buff *skb; 748 u32 id_xcan, dlc, data[2] = {0, 0}; 749 750 skb = alloc_can_skb(ndev, &cf); 751 if (unlikely(!skb)) { 752 stats->rx_dropped++; 753 return 0; 754 } 755 756 /* Read a frame from Xilinx zynq CANPS */ 757 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 758 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> 759 XCAN_DLCR_DLC_SHIFT; 760 761 /* Change Xilinx CAN data length format to socketCAN data format */ 762 cf->can_dlc = get_can_dlc(dlc); 763 764 /* Change Xilinx CAN ID format to socketCAN ID format */ 765 if (id_xcan & XCAN_IDR_IDE_MASK) { 766 /* The received frame is an Extended format frame */ 767 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 768 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 769 XCAN_IDR_ID2_SHIFT; 770 cf->can_id |= CAN_EFF_FLAG; 771 if (id_xcan & XCAN_IDR_RTR_MASK) 772 cf->can_id |= CAN_RTR_FLAG; 773 } else { 774 /* The received frame is a standard format frame */ 775 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 776 XCAN_IDR_ID1_SHIFT; 777 if (id_xcan & XCAN_IDR_SRR_MASK) 778 cf->can_id |= CAN_RTR_FLAG; 779 } 780 781 /* DW1/DW2 must always be read to remove message from RXFIFO */ 782 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); 783 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); 784 785 if (!(cf->can_id & CAN_RTR_FLAG)) { 786 /* Change Xilinx CAN data format to socketCAN data format */ 787 if (cf->can_dlc > 0) 788 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 789 if (cf->can_dlc > 4) 790 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); 791 } 792 793 stats->rx_bytes += cf->can_dlc; 794 stats->rx_packets++; 795 netif_receive_skb(skb); 796 797 return 1; 798 } 799 800 /** 801 * xcanfd_rx - Is called from CAN isr to complete the received 802 * frame processing 803 * @ndev: Pointer to net_device structure 804 * @frame_base: Register offset to the frame to be read 805 * 806 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 807 * does minimal processing and invokes "netif_receive_skb" to complete further 808 * processing. 809 * Return: 1 on success and 0 on failure. 810 */ 811 static int xcanfd_rx(struct net_device *ndev, int frame_base) 812 { 813 struct xcan_priv *priv = netdev_priv(ndev); 814 struct net_device_stats *stats = &ndev->stats; 815 struct canfd_frame *cf; 816 struct sk_buff *skb; 817 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; 818 819 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 820 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); 821 if (dlc & XCAN_DLCR_EDL_MASK) 822 skb = alloc_canfd_skb(ndev, &cf); 823 else 824 skb = alloc_can_skb(ndev, (struct can_frame **)&cf); 825 826 if (unlikely(!skb)) { 827 stats->rx_dropped++; 828 return 0; 829 } 830 831 /* Change Xilinx CANFD data length format to socketCAN data 832 * format 833 */ 834 if (dlc & XCAN_DLCR_EDL_MASK) 835 cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 836 XCAN_DLCR_DLC_SHIFT); 837 else 838 cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >> 839 XCAN_DLCR_DLC_SHIFT); 840 841 /* Change Xilinx CAN ID format to socketCAN ID format */ 842 if (id_xcan & XCAN_IDR_IDE_MASK) { 843 /* The received frame is an Extended format frame */ 844 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 845 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 846 XCAN_IDR_ID2_SHIFT; 847 cf->can_id |= CAN_EFF_FLAG; 848 if (id_xcan & XCAN_IDR_RTR_MASK) 849 cf->can_id |= CAN_RTR_FLAG; 850 } else { 851 /* The received frame is a standard format frame */ 852 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 853 XCAN_IDR_ID1_SHIFT; 854 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & 855 XCAN_IDR_SRR_MASK)) 856 cf->can_id |= CAN_RTR_FLAG; 857 } 858 859 /* Check the frame received is FD or not*/ 860 if (dlc & XCAN_DLCR_EDL_MASK) { 861 for (i = 0; i < cf->len; i += 4) { 862 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + 863 (dwindex * XCANFD_DW_BYTES); 864 data[0] = priv->read_reg(priv, dw_offset); 865 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 866 dwindex++; 867 } 868 } else { 869 for (i = 0; i < cf->len; i += 4) { 870 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); 871 data[0] = priv->read_reg(priv, dw_offset + i); 872 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 873 } 874 } 875 stats->rx_bytes += cf->len; 876 stats->rx_packets++; 877 netif_receive_skb(skb); 878 879 return 1; 880 } 881 882 /** 883 * xcan_current_error_state - Get current error state from HW 884 * @ndev: Pointer to net_device structure 885 * 886 * Checks the current CAN error state from the HW. Note that this 887 * only checks for ERROR_PASSIVE and ERROR_WARNING. 888 * 889 * Return: 890 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 891 * otherwise. 892 */ 893 static enum can_state xcan_current_error_state(struct net_device *ndev) 894 { 895 struct xcan_priv *priv = netdev_priv(ndev); 896 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 897 898 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 899 return CAN_STATE_ERROR_PASSIVE; 900 else if (status & XCAN_SR_ERRWRN_MASK) 901 return CAN_STATE_ERROR_WARNING; 902 else 903 return CAN_STATE_ERROR_ACTIVE; 904 } 905 906 /** 907 * xcan_set_error_state - Set new CAN error state 908 * @ndev: Pointer to net_device structure 909 * @new_state: The new CAN state to be set 910 * @cf: Error frame to be populated or NULL 911 * 912 * Set new CAN error state for the device, updating statistics and 913 * populating the error frame if given. 914 */ 915 static void xcan_set_error_state(struct net_device *ndev, 916 enum can_state new_state, 917 struct can_frame *cf) 918 { 919 struct xcan_priv *priv = netdev_priv(ndev); 920 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 921 u32 txerr = ecr & XCAN_ECR_TEC_MASK; 922 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 923 enum can_state tx_state = txerr >= rxerr ? new_state : 0; 924 enum can_state rx_state = txerr <= rxerr ? new_state : 0; 925 926 /* non-ERROR states are handled elsewhere */ 927 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) 928 return; 929 930 can_change_state(ndev, cf, tx_state, rx_state); 931 932 if (cf) { 933 cf->data[6] = txerr; 934 cf->data[7] = rxerr; 935 } 936 } 937 938 /** 939 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 940 * @ndev: Pointer to net_device structure 941 * 942 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 943 * the performed RX/TX has caused it to drop to a lesser state and set 944 * the interface state accordingly. 945 */ 946 static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 947 { 948 struct xcan_priv *priv = netdev_priv(ndev); 949 enum can_state old_state = priv->can.state; 950 enum can_state new_state; 951 952 /* changing error state due to successful frame RX/TX can only 953 * occur from these states 954 */ 955 if (old_state != CAN_STATE_ERROR_WARNING && 956 old_state != CAN_STATE_ERROR_PASSIVE) 957 return; 958 959 new_state = xcan_current_error_state(ndev); 960 961 if (new_state != old_state) { 962 struct sk_buff *skb; 963 struct can_frame *cf; 964 965 skb = alloc_can_err_skb(ndev, &cf); 966 967 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 968 969 if (skb) { 970 struct net_device_stats *stats = &ndev->stats; 971 972 stats->rx_packets++; 973 stats->rx_bytes += cf->can_dlc; 974 netif_rx(skb); 975 } 976 } 977 } 978 979 /** 980 * xcan_err_interrupt - error frame Isr 981 * @ndev: net_device pointer 982 * @isr: interrupt status register value 983 * 984 * This is the CAN error interrupt and it will 985 * check the the type of error and forward the error 986 * frame to upper layers. 987 */ 988 static void xcan_err_interrupt(struct net_device *ndev, u32 isr) 989 { 990 struct xcan_priv *priv = netdev_priv(ndev); 991 struct net_device_stats *stats = &ndev->stats; 992 struct can_frame cf = { }; 993 u32 err_status; 994 995 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 996 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 997 998 if (isr & XCAN_IXR_BSOFF_MASK) { 999 priv->can.state = CAN_STATE_BUS_OFF; 1000 priv->can.can_stats.bus_off++; 1001 /* Leave device in Config Mode in bus-off state */ 1002 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1003 can_bus_off(ndev); 1004 cf.can_id |= CAN_ERR_BUSOFF; 1005 } else { 1006 enum can_state new_state = xcan_current_error_state(ndev); 1007 1008 if (new_state != priv->can.state) 1009 xcan_set_error_state(ndev, new_state, &cf); 1010 } 1011 1012 /* Check for Arbitration lost interrupt */ 1013 if (isr & XCAN_IXR_ARBLST_MASK) { 1014 priv->can.can_stats.arbitration_lost++; 1015 cf.can_id |= CAN_ERR_LOSTARB; 1016 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; 1017 } 1018 1019 /* Check for RX FIFO Overflow interrupt */ 1020 if (isr & XCAN_IXR_RXOFLW_MASK) { 1021 stats->rx_over_errors++; 1022 stats->rx_errors++; 1023 cf.can_id |= CAN_ERR_CRTL; 1024 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1025 } 1026 1027 /* Check for RX Match Not Finished interrupt */ 1028 if (isr & XCAN_IXR_RXMNF_MASK) { 1029 stats->rx_dropped++; 1030 stats->rx_errors++; 1031 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1032 cf.can_id |= CAN_ERR_CRTL; 1033 cf.data[1] |= CAN_ERR_CRTL_UNSPEC; 1034 } 1035 1036 /* Check for error interrupt */ 1037 if (isr & XCAN_IXR_ERROR_MASK) { 1038 bool berr_reporting = false; 1039 1040 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 1041 berr_reporting = true; 1042 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1043 } 1044 1045 /* Check for Ack error interrupt */ 1046 if (err_status & XCAN_ESR_ACKER_MASK) { 1047 stats->tx_errors++; 1048 if (berr_reporting) { 1049 cf.can_id |= CAN_ERR_ACK; 1050 cf.data[3] = CAN_ERR_PROT_LOC_ACK; 1051 } 1052 } 1053 1054 /* Check for Bit error interrupt */ 1055 if (err_status & XCAN_ESR_BERR_MASK) { 1056 stats->tx_errors++; 1057 if (berr_reporting) { 1058 cf.can_id |= CAN_ERR_PROT; 1059 cf.data[2] = CAN_ERR_PROT_BIT; 1060 } 1061 } 1062 1063 /* Check for Stuff error interrupt */ 1064 if (err_status & XCAN_ESR_STER_MASK) { 1065 stats->rx_errors++; 1066 if (berr_reporting) { 1067 cf.can_id |= CAN_ERR_PROT; 1068 cf.data[2] = CAN_ERR_PROT_STUFF; 1069 } 1070 } 1071 1072 /* Check for Form error interrupt */ 1073 if (err_status & XCAN_ESR_FMER_MASK) { 1074 stats->rx_errors++; 1075 if (berr_reporting) { 1076 cf.can_id |= CAN_ERR_PROT; 1077 cf.data[2] = CAN_ERR_PROT_FORM; 1078 } 1079 } 1080 1081 /* Check for CRC error interrupt */ 1082 if (err_status & XCAN_ESR_CRCER_MASK) { 1083 stats->rx_errors++; 1084 if (berr_reporting) { 1085 cf.can_id |= CAN_ERR_PROT; 1086 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1087 } 1088 } 1089 priv->can.can_stats.bus_error++; 1090 } 1091 1092 if (cf.can_id) { 1093 struct can_frame *skb_cf; 1094 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); 1095 1096 if (skb) { 1097 skb_cf->can_id |= cf.can_id; 1098 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); 1099 stats->rx_packets++; 1100 stats->rx_bytes += CAN_ERR_DLC; 1101 netif_rx(skb); 1102 } 1103 } 1104 1105 netdev_dbg(ndev, "%s: error status register:0x%x\n", 1106 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); 1107 } 1108 1109 /** 1110 * xcan_state_interrupt - It will check the state of the CAN device 1111 * @ndev: net_device pointer 1112 * @isr: interrupt status register value 1113 * 1114 * This will checks the state of the CAN device 1115 * and puts the device into appropriate state. 1116 */ 1117 static void xcan_state_interrupt(struct net_device *ndev, u32 isr) 1118 { 1119 struct xcan_priv *priv = netdev_priv(ndev); 1120 1121 /* Check for Sleep interrupt if set put CAN device in sleep state */ 1122 if (isr & XCAN_IXR_SLP_MASK) 1123 priv->can.state = CAN_STATE_SLEEPING; 1124 1125 /* Check for Wake up interrupt if set put CAN device in Active state */ 1126 if (isr & XCAN_IXR_WKUP_MASK) 1127 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1128 } 1129 1130 /** 1131 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame 1132 * @priv: Driver private data structure 1133 * 1134 * Return: Register offset of the next frame in RX FIFO. 1135 */ 1136 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) 1137 { 1138 int offset; 1139 1140 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { 1141 u32 fsr, mask; 1142 1143 /* clear RXOK before the is-empty check so that any newly 1144 * received frame will reassert it without a race 1145 */ 1146 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); 1147 1148 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 1149 1150 /* check if RX FIFO is empty */ 1151 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1152 mask = XCAN_2_FSR_FL_MASK; 1153 else 1154 mask = XCAN_FSR_FL_MASK; 1155 1156 if (!(fsr & mask)) 1157 return -ENOENT; 1158 1159 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1160 offset = 1161 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); 1162 else 1163 offset = 1164 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1165 1166 } else { 1167 /* check if RX FIFO is empty */ 1168 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & 1169 XCAN_IXR_RXNEMP_MASK)) 1170 return -ENOENT; 1171 1172 /* frames are read from a static offset */ 1173 offset = XCAN_RXFIFO_OFFSET; 1174 } 1175 1176 return offset; 1177 } 1178 1179 /** 1180 * xcan_rx_poll - Poll routine for rx packets (NAPI) 1181 * @napi: napi structure pointer 1182 * @quota: Max number of rx packets to be processed. 1183 * 1184 * This is the poll routine for rx part. 1185 * It will process the packets maximux quota value. 1186 * 1187 * Return: number of packets received 1188 */ 1189 static int xcan_rx_poll(struct napi_struct *napi, int quota) 1190 { 1191 struct net_device *ndev = napi->dev; 1192 struct xcan_priv *priv = netdev_priv(ndev); 1193 u32 ier; 1194 int work_done = 0; 1195 int frame_offset; 1196 1197 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && 1198 (work_done < quota)) { 1199 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) 1200 work_done += xcanfd_rx(ndev, frame_offset); 1201 else 1202 work_done += xcan_rx(ndev, frame_offset); 1203 1204 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 1205 /* increment read index */ 1206 priv->write_reg(priv, XCAN_FSR_OFFSET, 1207 XCAN_FSR_IRI_MASK); 1208 else 1209 /* clear rx-not-empty (will actually clear only if 1210 * empty) 1211 */ 1212 priv->write_reg(priv, XCAN_ICR_OFFSET, 1213 XCAN_IXR_RXNEMP_MASK); 1214 } 1215 1216 if (work_done) { 1217 can_led_event(ndev, CAN_LED_EVENT_RX); 1218 xcan_update_error_state_after_rxtx(ndev); 1219 } 1220 1221 if (work_done < quota) { 1222 napi_complete_done(napi, work_done); 1223 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1224 ier |= xcan_rx_int_mask(priv); 1225 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1226 } 1227 return work_done; 1228 } 1229 1230 /** 1231 * xcan_tx_interrupt - Tx Done Isr 1232 * @ndev: net_device pointer 1233 * @isr: Interrupt status register value 1234 */ 1235 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) 1236 { 1237 struct xcan_priv *priv = netdev_priv(ndev); 1238 struct net_device_stats *stats = &ndev->stats; 1239 unsigned int frames_in_fifo; 1240 int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 1241 unsigned long flags; 1242 int retries = 0; 1243 1244 /* Synchronize with xmit as we need to know the exact number 1245 * of frames in the FIFO to stay in sync due to the TXFEMP 1246 * handling. 1247 * This also prevents a race between netif_wake_queue() and 1248 * netif_stop_queue(). 1249 */ 1250 spin_lock_irqsave(&priv->tx_lock, flags); 1251 1252 frames_in_fifo = priv->tx_head - priv->tx_tail; 1253 1254 if (WARN_ON_ONCE(frames_in_fifo == 0)) { 1255 /* clear TXOK anyway to avoid getting back here */ 1256 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1257 spin_unlock_irqrestore(&priv->tx_lock, flags); 1258 return; 1259 } 1260 1261 /* Check if 2 frames were sent (TXOK only means that at least 1 1262 * frame was sent). 1263 */ 1264 if (frames_in_fifo > 1) { 1265 WARN_ON(frames_in_fifo > priv->tx_max); 1266 1267 /* Synchronize TXOK and isr so that after the loop: 1268 * (1) isr variable is up-to-date at least up to TXOK clear 1269 * time. This avoids us clearing a TXOK of a second frame 1270 * but not noticing that the FIFO is now empty and thus 1271 * marking only a single frame as sent. 1272 * (2) No TXOK is left. Having one could mean leaving a 1273 * stray TXOK as we might process the associated frame 1274 * via TXFEMP handling as we read TXFEMP *after* TXOK 1275 * clear to satisfy (1). 1276 */ 1277 while ((isr & XCAN_IXR_TXOK_MASK) && 1278 !WARN_ON(++retries == 100)) { 1279 priv->write_reg(priv, XCAN_ICR_OFFSET, 1280 XCAN_IXR_TXOK_MASK); 1281 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1282 } 1283 1284 if (isr & XCAN_IXR_TXFEMP_MASK) { 1285 /* nothing in FIFO anymore */ 1286 frames_sent = frames_in_fifo; 1287 } 1288 } else { 1289 /* single frame in fifo, just clear TXOK */ 1290 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1291 } 1292 1293 while (frames_sent--) { 1294 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % 1295 priv->tx_max); 1296 priv->tx_tail++; 1297 stats->tx_packets++; 1298 } 1299 1300 netif_wake_queue(ndev); 1301 1302 spin_unlock_irqrestore(&priv->tx_lock, flags); 1303 1304 can_led_event(ndev, CAN_LED_EVENT_TX); 1305 xcan_update_error_state_after_rxtx(ndev); 1306 } 1307 1308 /** 1309 * xcan_interrupt - CAN Isr 1310 * @irq: irq number 1311 * @dev_id: device id pointer 1312 * 1313 * This is the xilinx CAN Isr. It checks for the type of interrupt 1314 * and invokes the corresponding ISR. 1315 * 1316 * Return: 1317 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise 1318 */ 1319 static irqreturn_t xcan_interrupt(int irq, void *dev_id) 1320 { 1321 struct net_device *ndev = (struct net_device *)dev_id; 1322 struct xcan_priv *priv = netdev_priv(ndev); 1323 u32 isr, ier; 1324 u32 isr_errors; 1325 u32 rx_int_mask = xcan_rx_int_mask(priv); 1326 1327 /* Get the interrupt status from Xilinx CAN */ 1328 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1329 if (!isr) 1330 return IRQ_NONE; 1331 1332 /* Check for the type of interrupt and Processing it */ 1333 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { 1334 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | 1335 XCAN_IXR_WKUP_MASK)); 1336 xcan_state_interrupt(ndev, isr); 1337 } 1338 1339 /* Check for Tx interrupt and Processing it */ 1340 if (isr & XCAN_IXR_TXOK_MASK) 1341 xcan_tx_interrupt(ndev, isr); 1342 1343 /* Check for the type of error interrupt and Processing it */ 1344 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1345 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | 1346 XCAN_IXR_RXMNF_MASK); 1347 if (isr_errors) { 1348 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 1349 xcan_err_interrupt(ndev, isr); 1350 } 1351 1352 /* Check for the type of receive interrupt and Processing it */ 1353 if (isr & rx_int_mask) { 1354 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1355 ier &= ~rx_int_mask; 1356 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1357 napi_schedule(&priv->napi); 1358 } 1359 return IRQ_HANDLED; 1360 } 1361 1362 /** 1363 * xcan_chip_stop - Driver stop routine 1364 * @ndev: Pointer to net_device structure 1365 * 1366 * This is the drivers stop routine. It will disable the 1367 * interrupts and put the device into configuration mode. 1368 */ 1369 static void xcan_chip_stop(struct net_device *ndev) 1370 { 1371 struct xcan_priv *priv = netdev_priv(ndev); 1372 int ret; 1373 1374 /* Disable interrupts and leave the can in configuration mode */ 1375 ret = set_reset_mode(ndev); 1376 if (ret < 0) 1377 netdev_dbg(ndev, "set_reset_mode() Failed\n"); 1378 1379 priv->can.state = CAN_STATE_STOPPED; 1380 } 1381 1382 /** 1383 * xcan_open - Driver open routine 1384 * @ndev: Pointer to net_device structure 1385 * 1386 * This is the driver open routine. 1387 * Return: 0 on success and failure value on error 1388 */ 1389 static int xcan_open(struct net_device *ndev) 1390 { 1391 struct xcan_priv *priv = netdev_priv(ndev); 1392 int ret; 1393 1394 ret = pm_runtime_get_sync(priv->dev); 1395 if (ret < 0) { 1396 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1397 __func__, ret); 1398 goto err; 1399 } 1400 1401 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, 1402 ndev->name, ndev); 1403 if (ret < 0) { 1404 netdev_err(ndev, "irq allocation for CAN failed\n"); 1405 goto err; 1406 } 1407 1408 /* Set chip into reset mode */ 1409 ret = set_reset_mode(ndev); 1410 if (ret < 0) { 1411 netdev_err(ndev, "mode resetting failed!\n"); 1412 goto err_irq; 1413 } 1414 1415 /* Common open */ 1416 ret = open_candev(ndev); 1417 if (ret) 1418 goto err_irq; 1419 1420 ret = xcan_chip_start(ndev); 1421 if (ret < 0) { 1422 netdev_err(ndev, "xcan_chip_start failed!\n"); 1423 goto err_candev; 1424 } 1425 1426 can_led_event(ndev, CAN_LED_EVENT_OPEN); 1427 napi_enable(&priv->napi); 1428 netif_start_queue(ndev); 1429 1430 return 0; 1431 1432 err_candev: 1433 close_candev(ndev); 1434 err_irq: 1435 free_irq(ndev->irq, ndev); 1436 err: 1437 pm_runtime_put(priv->dev); 1438 1439 return ret; 1440 } 1441 1442 /** 1443 * xcan_close - Driver close routine 1444 * @ndev: Pointer to net_device structure 1445 * 1446 * Return: 0 always 1447 */ 1448 static int xcan_close(struct net_device *ndev) 1449 { 1450 struct xcan_priv *priv = netdev_priv(ndev); 1451 1452 netif_stop_queue(ndev); 1453 napi_disable(&priv->napi); 1454 xcan_chip_stop(ndev); 1455 free_irq(ndev->irq, ndev); 1456 close_candev(ndev); 1457 1458 can_led_event(ndev, CAN_LED_EVENT_STOP); 1459 pm_runtime_put(priv->dev); 1460 1461 return 0; 1462 } 1463 1464 /** 1465 * xcan_get_berr_counter - error counter routine 1466 * @ndev: Pointer to net_device structure 1467 * @bec: Pointer to can_berr_counter structure 1468 * 1469 * This is the driver error counter routine. 1470 * Return: 0 on success and failure value on error 1471 */ 1472 static int xcan_get_berr_counter(const struct net_device *ndev, 1473 struct can_berr_counter *bec) 1474 { 1475 struct xcan_priv *priv = netdev_priv(ndev); 1476 int ret; 1477 1478 ret = pm_runtime_get_sync(priv->dev); 1479 if (ret < 0) { 1480 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1481 __func__, ret); 1482 pm_runtime_put(priv->dev); 1483 return ret; 1484 } 1485 1486 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 1487 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 1488 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 1489 1490 pm_runtime_put(priv->dev); 1491 1492 return 0; 1493 } 1494 1495 static const struct net_device_ops xcan_netdev_ops = { 1496 .ndo_open = xcan_open, 1497 .ndo_stop = xcan_close, 1498 .ndo_start_xmit = xcan_start_xmit, 1499 .ndo_change_mtu = can_change_mtu, 1500 }; 1501 1502 /** 1503 * xcan_suspend - Suspend method for the driver 1504 * @dev: Address of the device structure 1505 * 1506 * Put the driver into low power mode. 1507 * Return: 0 on success and failure value on error 1508 */ 1509 static int __maybe_unused xcan_suspend(struct device *dev) 1510 { 1511 struct net_device *ndev = dev_get_drvdata(dev); 1512 1513 if (netif_running(ndev)) { 1514 netif_stop_queue(ndev); 1515 netif_device_detach(ndev); 1516 xcan_chip_stop(ndev); 1517 } 1518 1519 return pm_runtime_force_suspend(dev); 1520 } 1521 1522 /** 1523 * xcan_resume - Resume from suspend 1524 * @dev: Address of the device structure 1525 * 1526 * Resume operation after suspend. 1527 * Return: 0 on success and failure value on error 1528 */ 1529 static int __maybe_unused xcan_resume(struct device *dev) 1530 { 1531 struct net_device *ndev = dev_get_drvdata(dev); 1532 int ret; 1533 1534 ret = pm_runtime_force_resume(dev); 1535 if (ret) { 1536 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 1537 return ret; 1538 } 1539 1540 if (netif_running(ndev)) { 1541 ret = xcan_chip_start(ndev); 1542 if (ret) { 1543 dev_err(dev, "xcan_chip_start failed on resume\n"); 1544 return ret; 1545 } 1546 1547 netif_device_attach(ndev); 1548 netif_start_queue(ndev); 1549 } 1550 1551 return 0; 1552 } 1553 1554 /** 1555 * xcan_runtime_suspend - Runtime suspend method for the driver 1556 * @dev: Address of the device structure 1557 * 1558 * Put the driver into low power mode. 1559 * Return: 0 always 1560 */ 1561 static int __maybe_unused xcan_runtime_suspend(struct device *dev) 1562 { 1563 struct net_device *ndev = dev_get_drvdata(dev); 1564 struct xcan_priv *priv = netdev_priv(ndev); 1565 1566 clk_disable_unprepare(priv->bus_clk); 1567 clk_disable_unprepare(priv->can_clk); 1568 1569 return 0; 1570 } 1571 1572 /** 1573 * xcan_runtime_resume - Runtime resume from suspend 1574 * @dev: Address of the device structure 1575 * 1576 * Resume operation after suspend. 1577 * Return: 0 on success and failure value on error 1578 */ 1579 static int __maybe_unused xcan_runtime_resume(struct device *dev) 1580 { 1581 struct net_device *ndev = dev_get_drvdata(dev); 1582 struct xcan_priv *priv = netdev_priv(ndev); 1583 int ret; 1584 1585 ret = clk_prepare_enable(priv->bus_clk); 1586 if (ret) { 1587 dev_err(dev, "Cannot enable clock.\n"); 1588 return ret; 1589 } 1590 ret = clk_prepare_enable(priv->can_clk); 1591 if (ret) { 1592 dev_err(dev, "Cannot enable clock.\n"); 1593 clk_disable_unprepare(priv->bus_clk); 1594 return ret; 1595 } 1596 1597 return 0; 1598 } 1599 1600 static const struct dev_pm_ops xcan_dev_pm_ops = { 1601 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1602 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1603 }; 1604 1605 static const struct xcan_devtype_data xcan_zynq_data = { 1606 .cantype = XZYNQ_CANPS, 1607 .flags = XCAN_FLAG_TXFEMP, 1608 .bittiming_const = &xcan_bittiming_const, 1609 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1610 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1611 .bus_clk_name = "pclk", 1612 }; 1613 1614 static const struct xcan_devtype_data xcan_axi_data = { 1615 .cantype = XAXI_CAN, 1616 .bittiming_const = &xcan_bittiming_const, 1617 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1618 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1619 .bus_clk_name = "s_axi_aclk", 1620 }; 1621 1622 static const struct xcan_devtype_data xcan_canfd_data = { 1623 .cantype = XAXI_CANFD, 1624 .flags = XCAN_FLAG_EXT_FILTERS | 1625 XCAN_FLAG_RXMNF | 1626 XCAN_FLAG_TX_MAILBOXES | 1627 XCAN_FLAG_RX_FIFO_MULTI, 1628 .bittiming_const = &xcan_bittiming_const_canfd, 1629 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1630 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1631 .bus_clk_name = "s_axi_aclk", 1632 }; 1633 1634 static const struct xcan_devtype_data xcan_canfd2_data = { 1635 .cantype = XAXI_CANFD_2_0, 1636 .flags = XCAN_FLAG_EXT_FILTERS | 1637 XCAN_FLAG_RXMNF | 1638 XCAN_FLAG_TX_MAILBOXES | 1639 XCAN_FLAG_CANFD_2 | 1640 XCAN_FLAG_RX_FIFO_MULTI, 1641 .bittiming_const = &xcan_bittiming_const_canfd2, 1642 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1643 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1644 .bus_clk_name = "s_axi_aclk", 1645 }; 1646 1647 /* Match table for OF platform binding */ 1648 static const struct of_device_id xcan_of_match[] = { 1649 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1650 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, 1651 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, 1652 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, 1653 { /* end of list */ }, 1654 }; 1655 MODULE_DEVICE_TABLE(of, xcan_of_match); 1656 1657 /** 1658 * xcan_probe - Platform registration call 1659 * @pdev: Handle to the platform device structure 1660 * 1661 * This function does all the memory allocation and registration for the CAN 1662 * device. 1663 * 1664 * Return: 0 on success and failure value on error 1665 */ 1666 static int xcan_probe(struct platform_device *pdev) 1667 { 1668 struct net_device *ndev; 1669 struct xcan_priv *priv; 1670 const struct of_device_id *of_id; 1671 const struct xcan_devtype_data *devtype = &xcan_axi_data; 1672 void __iomem *addr; 1673 int ret; 1674 int rx_max, tx_max; 1675 u32 hw_tx_max = 0, hw_rx_max = 0; 1676 const char *hw_tx_max_property; 1677 1678 /* Get the virtual base address for the device */ 1679 addr = devm_platform_ioremap_resource(pdev, 0); 1680 if (IS_ERR(addr)) { 1681 ret = PTR_ERR(addr); 1682 goto err; 1683 } 1684 1685 of_id = of_match_device(xcan_of_match, &pdev->dev); 1686 if (of_id && of_id->data) 1687 devtype = of_id->data; 1688 1689 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? 1690 "tx-mailbox-count" : "tx-fifo-depth"; 1691 1692 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, 1693 &hw_tx_max); 1694 if (ret < 0) { 1695 dev_err(&pdev->dev, "missing %s property\n", 1696 hw_tx_max_property); 1697 goto err; 1698 } 1699 1700 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", 1701 &hw_rx_max); 1702 if (ret < 0) { 1703 dev_err(&pdev->dev, 1704 "missing rx-fifo-depth property (mailbox mode is not supported)\n"); 1705 goto err; 1706 } 1707 1708 /* With TX FIFO: 1709 * 1710 * There is no way to directly figure out how many frames have been 1711 * sent when the TXOK interrupt is processed. If TXFEMP 1712 * is supported, we can have 2 frames in the FIFO and use TXFEMP 1713 * to determine if 1 or 2 frames have been sent. 1714 * Theoretically we should be able to use TXFWMEMP to determine up 1715 * to 3 frames, but it seems that after putting a second frame in the 1716 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1717 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1718 * sent), which is not a sensible state - possibly TXFWMEMP is not 1719 * completely synchronized with the rest of the bits? 1720 * 1721 * With TX mailboxes: 1722 * 1723 * HW sends frames in CAN ID priority order. To preserve FIFO ordering 1724 * we submit frames one at a time. 1725 */ 1726 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && 1727 (devtype->flags & XCAN_FLAG_TXFEMP)) 1728 tx_max = min(hw_tx_max, 2U); 1729 else 1730 tx_max = 1; 1731 1732 rx_max = hw_rx_max; 1733 1734 /* Create a CAN device instance */ 1735 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1736 if (!ndev) 1737 return -ENOMEM; 1738 1739 priv = netdev_priv(ndev); 1740 priv->dev = &pdev->dev; 1741 priv->can.bittiming_const = devtype->bittiming_const; 1742 priv->can.do_set_mode = xcan_do_set_mode; 1743 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1744 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1745 CAN_CTRLMODE_BERR_REPORTING; 1746 1747 if (devtype->cantype == XAXI_CANFD) 1748 priv->can.data_bittiming_const = 1749 &xcan_data_bittiming_const_canfd; 1750 1751 if (devtype->cantype == XAXI_CANFD_2_0) 1752 priv->can.data_bittiming_const = 1753 &xcan_data_bittiming_const_canfd2; 1754 1755 if (devtype->cantype == XAXI_CANFD || 1756 devtype->cantype == XAXI_CANFD_2_0) 1757 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; 1758 1759 priv->reg_base = addr; 1760 priv->tx_max = tx_max; 1761 priv->devtype = *devtype; 1762 spin_lock_init(&priv->tx_lock); 1763 1764 /* Get IRQ for the device */ 1765 ndev->irq = platform_get_irq(pdev, 0); 1766 ndev->flags |= IFF_ECHO; /* We support local echo */ 1767 1768 platform_set_drvdata(pdev, ndev); 1769 SET_NETDEV_DEV(ndev, &pdev->dev); 1770 ndev->netdev_ops = &xcan_netdev_ops; 1771 1772 /* Getting the CAN can_clk info */ 1773 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1774 if (IS_ERR(priv->can_clk)) { 1775 if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER) 1776 dev_err(&pdev->dev, "Device clock not found.\n"); 1777 ret = PTR_ERR(priv->can_clk); 1778 goto err_free; 1779 } 1780 1781 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1782 if (IS_ERR(priv->bus_clk)) { 1783 if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER) 1784 dev_err(&pdev->dev, "bus clock not found\n"); 1785 ret = PTR_ERR(priv->bus_clk); 1786 goto err_free; 1787 } 1788 1789 priv->write_reg = xcan_write_reg_le; 1790 priv->read_reg = xcan_read_reg_le; 1791 1792 pm_runtime_enable(&pdev->dev); 1793 ret = pm_runtime_get_sync(&pdev->dev); 1794 if (ret < 0) { 1795 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1796 __func__, ret); 1797 goto err_disableclks; 1798 } 1799 1800 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { 1801 priv->write_reg = xcan_write_reg_be; 1802 priv->read_reg = xcan_read_reg_be; 1803 } 1804 1805 priv->can.clock.freq = clk_get_rate(priv->can_clk); 1806 1807 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); 1808 1809 ret = register_candev(ndev); 1810 if (ret) { 1811 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); 1812 goto err_disableclks; 1813 } 1814 1815 devm_can_led_init(ndev); 1816 1817 pm_runtime_put(&pdev->dev); 1818 1819 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { 1820 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); 1821 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); 1822 } 1823 1824 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", 1825 priv->reg_base, ndev->irq, priv->can.clock.freq, 1826 hw_tx_max, priv->tx_max); 1827 1828 return 0; 1829 1830 err_disableclks: 1831 pm_runtime_put(priv->dev); 1832 pm_runtime_disable(&pdev->dev); 1833 err_free: 1834 free_candev(ndev); 1835 err: 1836 return ret; 1837 } 1838 1839 /** 1840 * xcan_remove - Unregister the device after releasing the resources 1841 * @pdev: Handle to the platform device structure 1842 * 1843 * This function frees all the resources allocated to the device. 1844 * Return: 0 always 1845 */ 1846 static int xcan_remove(struct platform_device *pdev) 1847 { 1848 struct net_device *ndev = platform_get_drvdata(pdev); 1849 struct xcan_priv *priv = netdev_priv(ndev); 1850 1851 unregister_candev(ndev); 1852 pm_runtime_disable(&pdev->dev); 1853 netif_napi_del(&priv->napi); 1854 free_candev(ndev); 1855 1856 return 0; 1857 } 1858 1859 static struct platform_driver xcan_driver = { 1860 .probe = xcan_probe, 1861 .remove = xcan_remove, 1862 .driver = { 1863 .name = DRIVER_NAME, 1864 .pm = &xcan_dev_pm_ops, 1865 .of_match_table = xcan_of_match, 1866 }, 1867 }; 1868 1869 module_platform_driver(xcan_driver); 1870 1871 MODULE_LICENSE("GPL"); 1872 MODULE_AUTHOR("Xilinx Inc"); 1873 MODULE_DESCRIPTION("Xilinx CAN interface"); 1874