1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xilinx CAN device driver 3 * 4 * Copyright (C) 2012 - 2014 Xilinx, Inc. 5 * Copyright (C) 2009 PetaLogix. All rights reserved. 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy 7 * 8 * Description: 9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/skbuff.h> 24 #include <linux/spinlock.h> 25 #include <linux/string.h> 26 #include <linux/types.h> 27 #include <linux/can/dev.h> 28 #include <linux/can/error.h> 29 #include <linux/can/led.h> 30 #include <linux/pm_runtime.h> 31 32 #define DRIVER_NAME "xilinx_can" 33 34 /* CAN registers set */ 35 enum xcan_reg { 36 XCAN_SRR_OFFSET = 0x00, /* Software reset */ 37 XCAN_MSR_OFFSET = 0x04, /* Mode select */ 38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ 39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ 40 XCAN_ECR_OFFSET = 0x10, /* Error counter */ 41 XCAN_ESR_OFFSET = 0x14, /* Error status */ 42 XCAN_SR_OFFSET = 0x18, /* Status */ 43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 46 47 /* not on CAN FD cores */ 48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ 49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ 50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ 51 52 /* only on CAN FD cores */ 53 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate 54 * Prescalar 55 */ 56 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ 57 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ 58 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ 59 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ 60 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ 61 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ 62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ 63 XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ 64 XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ 65 }; 66 67 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) 68 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) 69 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) 70 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) 71 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) 72 73 #define XCAN_CANFD_FRAME_SIZE 0x48 74 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ 75 XCAN_CANFD_FRAME_SIZE * (n)) 76 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ 77 XCAN_CANFD_FRAME_SIZE * (n)) 78 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ 79 XCAN_CANFD_FRAME_SIZE * (n)) 80 81 /* the single TX mailbox used by this driver on CAN FD HW */ 82 #define XCAN_TX_MAILBOX_IDX 0 83 84 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 85 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 86 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 87 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ 88 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ 89 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ 90 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 91 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 92 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 93 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ 94 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ 95 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ 96 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 97 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 98 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 99 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ 100 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ 101 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ 102 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ 103 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ 104 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ 105 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ 106 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 107 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 108 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 109 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ 110 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 111 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 112 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 113 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ 114 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ 115 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ 116 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ 117 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ 118 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ 119 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ 120 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ 121 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ 122 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ 123 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ 124 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 125 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 126 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 127 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ 128 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ 129 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ 130 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ 131 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ 132 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ 133 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ 134 135 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 136 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 137 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 138 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ 139 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ 140 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 141 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 142 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 143 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ 144 145 /* CAN frame length constants */ 146 #define XCAN_FRAME_MAX_DATA_LEN 8 147 #define XCANFD_DW_BYTES 4 148 #define XCAN_TIMEOUT (1 * HZ) 149 150 /* TX-FIFO-empty interrupt available */ 151 #define XCAN_FLAG_TXFEMP 0x0001 152 /* RX Match Not Finished interrupt available */ 153 #define XCAN_FLAG_RXMNF 0x0002 154 /* Extended acceptance filters with control at 0xE0 */ 155 #define XCAN_FLAG_EXT_FILTERS 0x0004 156 /* TX mailboxes instead of TX FIFO */ 157 #define XCAN_FLAG_TX_MAILBOXES 0x0008 158 /* RX FIFO with each buffer in separate registers at 0x1100 159 * instead of the regular FIFO at 0x50 160 */ 161 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 162 #define XCAN_FLAG_CANFD_2 0x0020 163 164 enum xcan_ip_type { 165 XAXI_CAN = 0, 166 XZYNQ_CANPS, 167 XAXI_CANFD, 168 XAXI_CANFD_2_0, 169 }; 170 171 struct xcan_devtype_data { 172 enum xcan_ip_type cantype; 173 unsigned int flags; 174 const struct can_bittiming_const *bittiming_const; 175 const char *bus_clk_name; 176 unsigned int btr_ts2_shift; 177 unsigned int btr_sjw_shift; 178 }; 179 180 /** 181 * struct xcan_priv - This definition define CAN driver instance 182 * @can: CAN private data structure. 183 * @tx_lock: Lock for synchronizing TX interrupt handling 184 * @tx_head: Tx CAN packets ready to send on the queue 185 * @tx_tail: Tx CAN packets successfully sended on the queue 186 * @tx_max: Maximum number packets the driver can send 187 * @napi: NAPI structure 188 * @read_reg: For reading data from CAN registers 189 * @write_reg: For writing data to CAN registers 190 * @dev: Network device data structure 191 * @reg_base: Ioremapped address to registers 192 * @irq_flags: For request_irq() 193 * @bus_clk: Pointer to struct clk 194 * @can_clk: Pointer to struct clk 195 * @devtype: Device type specific constants 196 */ 197 struct xcan_priv { 198 struct can_priv can; 199 spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ 200 unsigned int tx_head; 201 unsigned int tx_tail; 202 unsigned int tx_max; 203 struct napi_struct napi; 204 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); 205 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, 206 u32 val); 207 struct device *dev; 208 void __iomem *reg_base; 209 unsigned long irq_flags; 210 struct clk *bus_clk; 211 struct clk *can_clk; 212 struct xcan_devtype_data devtype; 213 }; 214 215 /* CAN Bittiming constants as per Xilinx CAN specs */ 216 static const struct can_bittiming_const xcan_bittiming_const = { 217 .name = DRIVER_NAME, 218 .tseg1_min = 1, 219 .tseg1_max = 16, 220 .tseg2_min = 1, 221 .tseg2_max = 8, 222 .sjw_max = 4, 223 .brp_min = 1, 224 .brp_max = 256, 225 .brp_inc = 1, 226 }; 227 228 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ 229 static const struct can_bittiming_const xcan_bittiming_const_canfd = { 230 .name = DRIVER_NAME, 231 .tseg1_min = 1, 232 .tseg1_max = 64, 233 .tseg2_min = 1, 234 .tseg2_max = 16, 235 .sjw_max = 16, 236 .brp_min = 1, 237 .brp_max = 256, 238 .brp_inc = 1, 239 }; 240 241 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ 242 static struct can_bittiming_const xcan_data_bittiming_const_canfd = { 243 .name = DRIVER_NAME, 244 .tseg1_min = 1, 245 .tseg1_max = 16, 246 .tseg2_min = 1, 247 .tseg2_max = 8, 248 .sjw_max = 8, 249 .brp_min = 1, 250 .brp_max = 256, 251 .brp_inc = 1, 252 }; 253 254 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ 255 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { 256 .name = DRIVER_NAME, 257 .tseg1_min = 1, 258 .tseg1_max = 256, 259 .tseg2_min = 1, 260 .tseg2_max = 128, 261 .sjw_max = 128, 262 .brp_min = 2, 263 .brp_max = 256, 264 .brp_inc = 1, 265 }; 266 267 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ 268 static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { 269 .name = DRIVER_NAME, 270 .tseg1_min = 1, 271 .tseg1_max = 32, 272 .tseg2_min = 1, 273 .tseg2_max = 16, 274 .sjw_max = 16, 275 .brp_min = 2, 276 .brp_max = 256, 277 .brp_inc = 1, 278 }; 279 280 /** 281 * xcan_write_reg_le - Write a value to the device register little endian 282 * @priv: Driver private data structure 283 * @reg: Register offset 284 * @val: Value to write at the Register offset 285 * 286 * Write data to the paricular CAN register 287 */ 288 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, 289 u32 val) 290 { 291 iowrite32(val, priv->reg_base + reg); 292 } 293 294 /** 295 * xcan_read_reg_le - Read a value from the device register little endian 296 * @priv: Driver private data structure 297 * @reg: Register offset 298 * 299 * Read data from the particular CAN register 300 * Return: value read from the CAN register 301 */ 302 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) 303 { 304 return ioread32(priv->reg_base + reg); 305 } 306 307 /** 308 * xcan_write_reg_be - Write a value to the device register big endian 309 * @priv: Driver private data structure 310 * @reg: Register offset 311 * @val: Value to write at the Register offset 312 * 313 * Write data to the paricular CAN register 314 */ 315 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, 316 u32 val) 317 { 318 iowrite32be(val, priv->reg_base + reg); 319 } 320 321 /** 322 * xcan_read_reg_be - Read a value from the device register big endian 323 * @priv: Driver private data structure 324 * @reg: Register offset 325 * 326 * Read data from the particular CAN register 327 * Return: value read from the CAN register 328 */ 329 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) 330 { 331 return ioread32be(priv->reg_base + reg); 332 } 333 334 /** 335 * xcan_rx_int_mask - Get the mask for the receive interrupt 336 * @priv: Driver private data structure 337 * 338 * Return: The receive interrupt mask used by the driver on this HW 339 */ 340 static u32 xcan_rx_int_mask(const struct xcan_priv *priv) 341 { 342 /* RXNEMP is better suited for our use case as it cannot be cleared 343 * while the FIFO is non-empty, but CAN FD HW does not have it 344 */ 345 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 346 return XCAN_IXR_RXOK_MASK; 347 else 348 return XCAN_IXR_RXNEMP_MASK; 349 } 350 351 /** 352 * set_reset_mode - Resets the CAN device mode 353 * @ndev: Pointer to net_device structure 354 * 355 * This is the driver reset mode routine.The driver 356 * enters into configuration mode. 357 * 358 * Return: 0 on success and failure value on error 359 */ 360 static int set_reset_mode(struct net_device *ndev) 361 { 362 struct xcan_priv *priv = netdev_priv(ndev); 363 unsigned long timeout; 364 365 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 366 367 timeout = jiffies + XCAN_TIMEOUT; 368 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { 369 if (time_after(jiffies, timeout)) { 370 netdev_warn(ndev, "timed out for config mode\n"); 371 return -ETIMEDOUT; 372 } 373 usleep_range(500, 10000); 374 } 375 376 /* reset clears FIFOs */ 377 priv->tx_head = 0; 378 priv->tx_tail = 0; 379 380 return 0; 381 } 382 383 /** 384 * xcan_set_bittiming - CAN set bit timing routine 385 * @ndev: Pointer to net_device structure 386 * 387 * This is the driver set bittiming routine. 388 * Return: 0 on success and failure value on error 389 */ 390 static int xcan_set_bittiming(struct net_device *ndev) 391 { 392 struct xcan_priv *priv = netdev_priv(ndev); 393 struct can_bittiming *bt = &priv->can.bittiming; 394 struct can_bittiming *dbt = &priv->can.data_bittiming; 395 u32 btr0, btr1; 396 u32 is_config_mode; 397 398 /* Check whether Xilinx CAN is in configuration mode. 399 * It cannot set bit timing if Xilinx CAN is not in configuration mode. 400 */ 401 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & 402 XCAN_SR_CONFIG_MASK; 403 if (!is_config_mode) { 404 netdev_alert(ndev, 405 "BUG! Cannot set bittiming - CAN is not in config mode\n"); 406 return -EPERM; 407 } 408 409 /* Setting Baud Rate prescalar value in BRPR Register */ 410 btr0 = (bt->brp - 1); 411 412 /* Setting Time Segment 1 in BTR Register */ 413 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 414 415 /* Setting Time Segment 2 in BTR Register */ 416 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 417 418 /* Setting Synchronous jump width in BTR Register */ 419 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; 420 421 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 422 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 423 424 if (priv->devtype.cantype == XAXI_CANFD || 425 priv->devtype.cantype == XAXI_CANFD_2_0) { 426 /* Setting Baud Rate prescalar value in F_BRPR Register */ 427 btr0 = dbt->brp - 1; 428 429 /* Setting Time Segment 1 in BTR Register */ 430 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; 431 432 /* Setting Time Segment 2 in BTR Register */ 433 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 434 435 /* Setting Synchronous jump width in BTR Register */ 436 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; 437 438 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); 439 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); 440 } 441 442 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", 443 priv->read_reg(priv, XCAN_BRPR_OFFSET), 444 priv->read_reg(priv, XCAN_BTR_OFFSET)); 445 446 return 0; 447 } 448 449 /** 450 * xcan_chip_start - This the drivers start routine 451 * @ndev: Pointer to net_device structure 452 * 453 * This is the drivers start routine. 454 * Based on the State of the CAN device it puts 455 * the CAN device into a proper mode. 456 * 457 * Return: 0 on success and failure value on error 458 */ 459 static int xcan_chip_start(struct net_device *ndev) 460 { 461 struct xcan_priv *priv = netdev_priv(ndev); 462 u32 reg_msr; 463 int err; 464 u32 ier; 465 466 /* Check if it is in reset mode */ 467 err = set_reset_mode(ndev); 468 if (err < 0) 469 return err; 470 471 err = xcan_set_bittiming(ndev); 472 if (err < 0) 473 return err; 474 475 /* Enable interrupts 476 * 477 * We enable the ERROR interrupt even with 478 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no 479 * dedicated interrupt for a state change to 480 * ERROR_WARNING/ERROR_PASSIVE. 481 */ 482 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 483 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 484 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 485 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); 486 487 if (priv->devtype.flags & XCAN_FLAG_RXMNF) 488 ier |= XCAN_IXR_RXMNF_MASK; 489 490 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 491 492 /* Check whether it is loopback mode or normal mode */ 493 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 494 reg_msr = XCAN_MSR_LBACK_MASK; 495 else 496 reg_msr = 0x0; 497 498 /* enable the first extended filter, if any, as cores with extended 499 * filtering default to non-receipt if all filters are disabled 500 */ 501 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) 502 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); 503 504 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 505 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 506 507 netdev_dbg(ndev, "status:#x%08x\n", 508 priv->read_reg(priv, XCAN_SR_OFFSET)); 509 510 priv->can.state = CAN_STATE_ERROR_ACTIVE; 511 return 0; 512 } 513 514 /** 515 * xcan_do_set_mode - This sets the mode of the driver 516 * @ndev: Pointer to net_device structure 517 * @mode: Tells the mode of the driver 518 * 519 * This check the drivers state and calls the corresponding modes to set. 520 * 521 * Return: 0 on success and failure value on error 522 */ 523 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 524 { 525 int ret; 526 527 switch (mode) { 528 case CAN_MODE_START: 529 ret = xcan_chip_start(ndev); 530 if (ret < 0) { 531 netdev_err(ndev, "xcan_chip_start failed!\n"); 532 return ret; 533 } 534 netif_wake_queue(ndev); 535 break; 536 default: 537 ret = -EOPNOTSUPP; 538 break; 539 } 540 541 return ret; 542 } 543 544 /** 545 * xcan_write_frame - Write a frame to HW 546 * @ndev: Pointer to net_device structure 547 * @skb: sk_buff pointer that contains data to be Txed 548 * @frame_offset: Register offset to write the frame to 549 */ 550 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, 551 int frame_offset) 552 { 553 u32 id, dlc, data[2] = {0, 0}; 554 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 555 u32 ramoff, dwindex = 0, i; 556 struct xcan_priv *priv = netdev_priv(ndev); 557 558 /* Watch carefully on the bit sequence */ 559 if (cf->can_id & CAN_EFF_FLAG) { 560 /* Extended CAN ID format */ 561 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & 562 XCAN_IDR_ID2_MASK; 563 id |= (((cf->can_id & CAN_EFF_MASK) >> 564 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << 565 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; 566 567 /* The substibute remote TX request bit should be "1" 568 * for extended frames as in the Xilinx CAN datasheet 569 */ 570 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; 571 572 if (cf->can_id & CAN_RTR_FLAG) 573 /* Extended frames remote TX request */ 574 id |= XCAN_IDR_RTR_MASK; 575 } else { 576 /* Standard CAN ID format */ 577 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & 578 XCAN_IDR_ID1_MASK; 579 580 if (cf->can_id & CAN_RTR_FLAG) 581 /* Standard frames remote TX request */ 582 id |= XCAN_IDR_SRR_MASK; 583 } 584 585 dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; 586 if (can_is_canfd_skb(skb)) { 587 if (cf->flags & CANFD_BRS) 588 dlc |= XCAN_DLCR_BRS_MASK; 589 dlc |= XCAN_DLCR_EDL_MASK; 590 } 591 592 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && 593 (priv->devtype.flags & XCAN_FLAG_TXFEMP)) 594 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); 595 else 596 can_put_echo_skb(skb, ndev, 0, 0); 597 598 priv->tx_head++; 599 600 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 601 /* If the CAN frame is RTR frame this write triggers transmission 602 * (not on CAN FD) 603 */ 604 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); 605 if (priv->devtype.cantype == XAXI_CANFD || 606 priv->devtype.cantype == XAXI_CANFD_2_0) { 607 for (i = 0; i < cf->len; i += 4) { 608 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + 609 (dwindex * XCANFD_DW_BYTES); 610 priv->write_reg(priv, ramoff, 611 be32_to_cpup((__be32 *)(cf->data + i))); 612 dwindex++; 613 } 614 } else { 615 if (cf->len > 0) 616 data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); 617 if (cf->len > 4) 618 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 619 620 if (!(cf->can_id & CAN_RTR_FLAG)) { 621 priv->write_reg(priv, 622 XCAN_FRAME_DW1_OFFSET(frame_offset), 623 data[0]); 624 /* If the CAN frame is Standard/Extended frame this 625 * write triggers transmission (not on CAN FD) 626 */ 627 priv->write_reg(priv, 628 XCAN_FRAME_DW2_OFFSET(frame_offset), 629 data[1]); 630 } 631 } 632 } 633 634 /** 635 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) 636 * @skb: sk_buff pointer that contains data to be Txed 637 * @ndev: Pointer to net_device structure 638 * 639 * Return: 0 on success, -ENOSPC if FIFO is full. 640 */ 641 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) 642 { 643 struct xcan_priv *priv = netdev_priv(ndev); 644 unsigned long flags; 645 646 /* Check if the TX buffer is full */ 647 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & 648 XCAN_SR_TXFLL_MASK)) 649 return -ENOSPC; 650 651 spin_lock_irqsave(&priv->tx_lock, flags); 652 653 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); 654 655 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 656 if (priv->tx_max > 1) 657 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 658 659 /* Check if the TX buffer is full */ 660 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 661 netif_stop_queue(ndev); 662 663 spin_unlock_irqrestore(&priv->tx_lock, flags); 664 665 return 0; 666 } 667 668 /** 669 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) 670 * @skb: sk_buff pointer that contains data to be Txed 671 * @ndev: Pointer to net_device structure 672 * 673 * Return: 0 on success, -ENOSPC if there is no space 674 */ 675 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) 676 { 677 struct xcan_priv *priv = netdev_priv(ndev); 678 unsigned long flags; 679 680 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & 681 BIT(XCAN_TX_MAILBOX_IDX))) 682 return -ENOSPC; 683 684 spin_lock_irqsave(&priv->tx_lock, flags); 685 686 xcan_write_frame(ndev, skb, 687 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 688 689 /* Mark buffer as ready for transmit */ 690 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); 691 692 netif_stop_queue(ndev); 693 694 spin_unlock_irqrestore(&priv->tx_lock, flags); 695 696 return 0; 697 } 698 699 /** 700 * xcan_start_xmit - Starts the transmission 701 * @skb: sk_buff pointer that contains data to be Txed 702 * @ndev: Pointer to net_device structure 703 * 704 * This function is invoked from upper layers to initiate transmission. 705 * 706 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full 707 */ 708 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 709 { 710 struct xcan_priv *priv = netdev_priv(ndev); 711 int ret; 712 713 if (can_dropped_invalid_skb(ndev, skb)) 714 return NETDEV_TX_OK; 715 716 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) 717 ret = xcan_start_xmit_mailbox(skb, ndev); 718 else 719 ret = xcan_start_xmit_fifo(skb, ndev); 720 721 if (ret < 0) { 722 netdev_err(ndev, "BUG!, TX full when queue awake!\n"); 723 netif_stop_queue(ndev); 724 return NETDEV_TX_BUSY; 725 } 726 727 return NETDEV_TX_OK; 728 } 729 730 /** 731 * xcan_rx - Is called from CAN isr to complete the received 732 * frame processing 733 * @ndev: Pointer to net_device structure 734 * @frame_base: Register offset to the frame to be read 735 * 736 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 737 * does minimal processing and invokes "netif_receive_skb" to complete further 738 * processing. 739 * Return: 1 on success and 0 on failure. 740 */ 741 static int xcan_rx(struct net_device *ndev, int frame_base) 742 { 743 struct xcan_priv *priv = netdev_priv(ndev); 744 struct net_device_stats *stats = &ndev->stats; 745 struct can_frame *cf; 746 struct sk_buff *skb; 747 u32 id_xcan, dlc, data[2] = {0, 0}; 748 749 skb = alloc_can_skb(ndev, &cf); 750 if (unlikely(!skb)) { 751 stats->rx_dropped++; 752 return 0; 753 } 754 755 /* Read a frame from Xilinx zynq CANPS */ 756 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 757 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> 758 XCAN_DLCR_DLC_SHIFT; 759 760 /* Change Xilinx CAN data length format to socketCAN data format */ 761 cf->len = can_cc_dlc2len(dlc); 762 763 /* Change Xilinx CAN ID format to socketCAN ID format */ 764 if (id_xcan & XCAN_IDR_IDE_MASK) { 765 /* The received frame is an Extended format frame */ 766 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 767 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 768 XCAN_IDR_ID2_SHIFT; 769 cf->can_id |= CAN_EFF_FLAG; 770 if (id_xcan & XCAN_IDR_RTR_MASK) 771 cf->can_id |= CAN_RTR_FLAG; 772 } else { 773 /* The received frame is a standard format frame */ 774 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 775 XCAN_IDR_ID1_SHIFT; 776 if (id_xcan & XCAN_IDR_SRR_MASK) 777 cf->can_id |= CAN_RTR_FLAG; 778 } 779 780 /* DW1/DW2 must always be read to remove message from RXFIFO */ 781 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); 782 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); 783 784 if (!(cf->can_id & CAN_RTR_FLAG)) { 785 /* Change Xilinx CAN data format to socketCAN data format */ 786 if (cf->len > 0) 787 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 788 if (cf->len > 4) 789 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); 790 } 791 792 stats->rx_bytes += cf->len; 793 stats->rx_packets++; 794 netif_receive_skb(skb); 795 796 return 1; 797 } 798 799 /** 800 * xcanfd_rx - Is called from CAN isr to complete the received 801 * frame processing 802 * @ndev: Pointer to net_device structure 803 * @frame_base: Register offset to the frame to be read 804 * 805 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 806 * does minimal processing and invokes "netif_receive_skb" to complete further 807 * processing. 808 * Return: 1 on success and 0 on failure. 809 */ 810 static int xcanfd_rx(struct net_device *ndev, int frame_base) 811 { 812 struct xcan_priv *priv = netdev_priv(ndev); 813 struct net_device_stats *stats = &ndev->stats; 814 struct canfd_frame *cf; 815 struct sk_buff *skb; 816 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; 817 818 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 819 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); 820 if (dlc & XCAN_DLCR_EDL_MASK) 821 skb = alloc_canfd_skb(ndev, &cf); 822 else 823 skb = alloc_can_skb(ndev, (struct can_frame **)&cf); 824 825 if (unlikely(!skb)) { 826 stats->rx_dropped++; 827 return 0; 828 } 829 830 /* Change Xilinx CANFD data length format to socketCAN data 831 * format 832 */ 833 if (dlc & XCAN_DLCR_EDL_MASK) 834 cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 835 XCAN_DLCR_DLC_SHIFT); 836 else 837 cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 838 XCAN_DLCR_DLC_SHIFT); 839 840 /* Change Xilinx CAN ID format to socketCAN ID format */ 841 if (id_xcan & XCAN_IDR_IDE_MASK) { 842 /* The received frame is an Extended format frame */ 843 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 844 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 845 XCAN_IDR_ID2_SHIFT; 846 cf->can_id |= CAN_EFF_FLAG; 847 if (id_xcan & XCAN_IDR_RTR_MASK) 848 cf->can_id |= CAN_RTR_FLAG; 849 } else { 850 /* The received frame is a standard format frame */ 851 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 852 XCAN_IDR_ID1_SHIFT; 853 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & 854 XCAN_IDR_SRR_MASK)) 855 cf->can_id |= CAN_RTR_FLAG; 856 } 857 858 /* Check the frame received is FD or not*/ 859 if (dlc & XCAN_DLCR_EDL_MASK) { 860 for (i = 0; i < cf->len; i += 4) { 861 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + 862 (dwindex * XCANFD_DW_BYTES); 863 data[0] = priv->read_reg(priv, dw_offset); 864 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 865 dwindex++; 866 } 867 } else { 868 for (i = 0; i < cf->len; i += 4) { 869 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); 870 data[0] = priv->read_reg(priv, dw_offset + i); 871 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 872 } 873 } 874 stats->rx_bytes += cf->len; 875 stats->rx_packets++; 876 netif_receive_skb(skb); 877 878 return 1; 879 } 880 881 /** 882 * xcan_current_error_state - Get current error state from HW 883 * @ndev: Pointer to net_device structure 884 * 885 * Checks the current CAN error state from the HW. Note that this 886 * only checks for ERROR_PASSIVE and ERROR_WARNING. 887 * 888 * Return: 889 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 890 * otherwise. 891 */ 892 static enum can_state xcan_current_error_state(struct net_device *ndev) 893 { 894 struct xcan_priv *priv = netdev_priv(ndev); 895 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 896 897 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 898 return CAN_STATE_ERROR_PASSIVE; 899 else if (status & XCAN_SR_ERRWRN_MASK) 900 return CAN_STATE_ERROR_WARNING; 901 else 902 return CAN_STATE_ERROR_ACTIVE; 903 } 904 905 /** 906 * xcan_set_error_state - Set new CAN error state 907 * @ndev: Pointer to net_device structure 908 * @new_state: The new CAN state to be set 909 * @cf: Error frame to be populated or NULL 910 * 911 * Set new CAN error state for the device, updating statistics and 912 * populating the error frame if given. 913 */ 914 static void xcan_set_error_state(struct net_device *ndev, 915 enum can_state new_state, 916 struct can_frame *cf) 917 { 918 struct xcan_priv *priv = netdev_priv(ndev); 919 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 920 u32 txerr = ecr & XCAN_ECR_TEC_MASK; 921 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 922 enum can_state tx_state = txerr >= rxerr ? new_state : 0; 923 enum can_state rx_state = txerr <= rxerr ? new_state : 0; 924 925 /* non-ERROR states are handled elsewhere */ 926 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) 927 return; 928 929 can_change_state(ndev, cf, tx_state, rx_state); 930 931 if (cf) { 932 cf->data[6] = txerr; 933 cf->data[7] = rxerr; 934 } 935 } 936 937 /** 938 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 939 * @ndev: Pointer to net_device structure 940 * 941 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 942 * the performed RX/TX has caused it to drop to a lesser state and set 943 * the interface state accordingly. 944 */ 945 static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 946 { 947 struct xcan_priv *priv = netdev_priv(ndev); 948 enum can_state old_state = priv->can.state; 949 enum can_state new_state; 950 951 /* changing error state due to successful frame RX/TX can only 952 * occur from these states 953 */ 954 if (old_state != CAN_STATE_ERROR_WARNING && 955 old_state != CAN_STATE_ERROR_PASSIVE) 956 return; 957 958 new_state = xcan_current_error_state(ndev); 959 960 if (new_state != old_state) { 961 struct sk_buff *skb; 962 struct can_frame *cf; 963 964 skb = alloc_can_err_skb(ndev, &cf); 965 966 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 967 968 if (skb) { 969 struct net_device_stats *stats = &ndev->stats; 970 971 stats->rx_packets++; 972 stats->rx_bytes += cf->len; 973 netif_rx(skb); 974 } 975 } 976 } 977 978 /** 979 * xcan_err_interrupt - error frame Isr 980 * @ndev: net_device pointer 981 * @isr: interrupt status register value 982 * 983 * This is the CAN error interrupt and it will 984 * check the type of error and forward the error 985 * frame to upper layers. 986 */ 987 static void xcan_err_interrupt(struct net_device *ndev, u32 isr) 988 { 989 struct xcan_priv *priv = netdev_priv(ndev); 990 struct net_device_stats *stats = &ndev->stats; 991 struct can_frame cf = { }; 992 u32 err_status; 993 994 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 995 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 996 997 if (isr & XCAN_IXR_BSOFF_MASK) { 998 priv->can.state = CAN_STATE_BUS_OFF; 999 priv->can.can_stats.bus_off++; 1000 /* Leave device in Config Mode in bus-off state */ 1001 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1002 can_bus_off(ndev); 1003 cf.can_id |= CAN_ERR_BUSOFF; 1004 } else { 1005 enum can_state new_state = xcan_current_error_state(ndev); 1006 1007 if (new_state != priv->can.state) 1008 xcan_set_error_state(ndev, new_state, &cf); 1009 } 1010 1011 /* Check for Arbitration lost interrupt */ 1012 if (isr & XCAN_IXR_ARBLST_MASK) { 1013 priv->can.can_stats.arbitration_lost++; 1014 cf.can_id |= CAN_ERR_LOSTARB; 1015 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; 1016 } 1017 1018 /* Check for RX FIFO Overflow interrupt */ 1019 if (isr & XCAN_IXR_RXOFLW_MASK) { 1020 stats->rx_over_errors++; 1021 stats->rx_errors++; 1022 cf.can_id |= CAN_ERR_CRTL; 1023 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1024 } 1025 1026 /* Check for RX Match Not Finished interrupt */ 1027 if (isr & XCAN_IXR_RXMNF_MASK) { 1028 stats->rx_dropped++; 1029 stats->rx_errors++; 1030 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1031 cf.can_id |= CAN_ERR_CRTL; 1032 cf.data[1] |= CAN_ERR_CRTL_UNSPEC; 1033 } 1034 1035 /* Check for error interrupt */ 1036 if (isr & XCAN_IXR_ERROR_MASK) { 1037 bool berr_reporting = false; 1038 1039 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 1040 berr_reporting = true; 1041 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1042 } 1043 1044 /* Check for Ack error interrupt */ 1045 if (err_status & XCAN_ESR_ACKER_MASK) { 1046 stats->tx_errors++; 1047 if (berr_reporting) { 1048 cf.can_id |= CAN_ERR_ACK; 1049 cf.data[3] = CAN_ERR_PROT_LOC_ACK; 1050 } 1051 } 1052 1053 /* Check for Bit error interrupt */ 1054 if (err_status & XCAN_ESR_BERR_MASK) { 1055 stats->tx_errors++; 1056 if (berr_reporting) { 1057 cf.can_id |= CAN_ERR_PROT; 1058 cf.data[2] = CAN_ERR_PROT_BIT; 1059 } 1060 } 1061 1062 /* Check for Stuff error interrupt */ 1063 if (err_status & XCAN_ESR_STER_MASK) { 1064 stats->rx_errors++; 1065 if (berr_reporting) { 1066 cf.can_id |= CAN_ERR_PROT; 1067 cf.data[2] = CAN_ERR_PROT_STUFF; 1068 } 1069 } 1070 1071 /* Check for Form error interrupt */ 1072 if (err_status & XCAN_ESR_FMER_MASK) { 1073 stats->rx_errors++; 1074 if (berr_reporting) { 1075 cf.can_id |= CAN_ERR_PROT; 1076 cf.data[2] = CAN_ERR_PROT_FORM; 1077 } 1078 } 1079 1080 /* Check for CRC error interrupt */ 1081 if (err_status & XCAN_ESR_CRCER_MASK) { 1082 stats->rx_errors++; 1083 if (berr_reporting) { 1084 cf.can_id |= CAN_ERR_PROT; 1085 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1086 } 1087 } 1088 priv->can.can_stats.bus_error++; 1089 } 1090 1091 if (cf.can_id) { 1092 struct can_frame *skb_cf; 1093 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); 1094 1095 if (skb) { 1096 skb_cf->can_id |= cf.can_id; 1097 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); 1098 stats->rx_packets++; 1099 stats->rx_bytes += CAN_ERR_DLC; 1100 netif_rx(skb); 1101 } 1102 } 1103 1104 netdev_dbg(ndev, "%s: error status register:0x%x\n", 1105 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); 1106 } 1107 1108 /** 1109 * xcan_state_interrupt - It will check the state of the CAN device 1110 * @ndev: net_device pointer 1111 * @isr: interrupt status register value 1112 * 1113 * This will checks the state of the CAN device 1114 * and puts the device into appropriate state. 1115 */ 1116 static void xcan_state_interrupt(struct net_device *ndev, u32 isr) 1117 { 1118 struct xcan_priv *priv = netdev_priv(ndev); 1119 1120 /* Check for Sleep interrupt if set put CAN device in sleep state */ 1121 if (isr & XCAN_IXR_SLP_MASK) 1122 priv->can.state = CAN_STATE_SLEEPING; 1123 1124 /* Check for Wake up interrupt if set put CAN device in Active state */ 1125 if (isr & XCAN_IXR_WKUP_MASK) 1126 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1127 } 1128 1129 /** 1130 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame 1131 * @priv: Driver private data structure 1132 * 1133 * Return: Register offset of the next frame in RX FIFO. 1134 */ 1135 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) 1136 { 1137 int offset; 1138 1139 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { 1140 u32 fsr, mask; 1141 1142 /* clear RXOK before the is-empty check so that any newly 1143 * received frame will reassert it without a race 1144 */ 1145 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); 1146 1147 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 1148 1149 /* check if RX FIFO is empty */ 1150 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1151 mask = XCAN_2_FSR_FL_MASK; 1152 else 1153 mask = XCAN_FSR_FL_MASK; 1154 1155 if (!(fsr & mask)) 1156 return -ENOENT; 1157 1158 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1159 offset = 1160 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); 1161 else 1162 offset = 1163 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1164 1165 } else { 1166 /* check if RX FIFO is empty */ 1167 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & 1168 XCAN_IXR_RXNEMP_MASK)) 1169 return -ENOENT; 1170 1171 /* frames are read from a static offset */ 1172 offset = XCAN_RXFIFO_OFFSET; 1173 } 1174 1175 return offset; 1176 } 1177 1178 /** 1179 * xcan_rx_poll - Poll routine for rx packets (NAPI) 1180 * @napi: napi structure pointer 1181 * @quota: Max number of rx packets to be processed. 1182 * 1183 * This is the poll routine for rx part. 1184 * It will process the packets maximux quota value. 1185 * 1186 * Return: number of packets received 1187 */ 1188 static int xcan_rx_poll(struct napi_struct *napi, int quota) 1189 { 1190 struct net_device *ndev = napi->dev; 1191 struct xcan_priv *priv = netdev_priv(ndev); 1192 u32 ier; 1193 int work_done = 0; 1194 int frame_offset; 1195 1196 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && 1197 (work_done < quota)) { 1198 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) 1199 work_done += xcanfd_rx(ndev, frame_offset); 1200 else 1201 work_done += xcan_rx(ndev, frame_offset); 1202 1203 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 1204 /* increment read index */ 1205 priv->write_reg(priv, XCAN_FSR_OFFSET, 1206 XCAN_FSR_IRI_MASK); 1207 else 1208 /* clear rx-not-empty (will actually clear only if 1209 * empty) 1210 */ 1211 priv->write_reg(priv, XCAN_ICR_OFFSET, 1212 XCAN_IXR_RXNEMP_MASK); 1213 } 1214 1215 if (work_done) { 1216 can_led_event(ndev, CAN_LED_EVENT_RX); 1217 xcan_update_error_state_after_rxtx(ndev); 1218 } 1219 1220 if (work_done < quota) { 1221 napi_complete_done(napi, work_done); 1222 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1223 ier |= xcan_rx_int_mask(priv); 1224 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1225 } 1226 return work_done; 1227 } 1228 1229 /** 1230 * xcan_tx_interrupt - Tx Done Isr 1231 * @ndev: net_device pointer 1232 * @isr: Interrupt status register value 1233 */ 1234 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) 1235 { 1236 struct xcan_priv *priv = netdev_priv(ndev); 1237 struct net_device_stats *stats = &ndev->stats; 1238 unsigned int frames_in_fifo; 1239 int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 1240 unsigned long flags; 1241 int retries = 0; 1242 1243 /* Synchronize with xmit as we need to know the exact number 1244 * of frames in the FIFO to stay in sync due to the TXFEMP 1245 * handling. 1246 * This also prevents a race between netif_wake_queue() and 1247 * netif_stop_queue(). 1248 */ 1249 spin_lock_irqsave(&priv->tx_lock, flags); 1250 1251 frames_in_fifo = priv->tx_head - priv->tx_tail; 1252 1253 if (WARN_ON_ONCE(frames_in_fifo == 0)) { 1254 /* clear TXOK anyway to avoid getting back here */ 1255 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1256 spin_unlock_irqrestore(&priv->tx_lock, flags); 1257 return; 1258 } 1259 1260 /* Check if 2 frames were sent (TXOK only means that at least 1 1261 * frame was sent). 1262 */ 1263 if (frames_in_fifo > 1) { 1264 WARN_ON(frames_in_fifo > priv->tx_max); 1265 1266 /* Synchronize TXOK and isr so that after the loop: 1267 * (1) isr variable is up-to-date at least up to TXOK clear 1268 * time. This avoids us clearing a TXOK of a second frame 1269 * but not noticing that the FIFO is now empty and thus 1270 * marking only a single frame as sent. 1271 * (2) No TXOK is left. Having one could mean leaving a 1272 * stray TXOK as we might process the associated frame 1273 * via TXFEMP handling as we read TXFEMP *after* TXOK 1274 * clear to satisfy (1). 1275 */ 1276 while ((isr & XCAN_IXR_TXOK_MASK) && 1277 !WARN_ON(++retries == 100)) { 1278 priv->write_reg(priv, XCAN_ICR_OFFSET, 1279 XCAN_IXR_TXOK_MASK); 1280 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1281 } 1282 1283 if (isr & XCAN_IXR_TXFEMP_MASK) { 1284 /* nothing in FIFO anymore */ 1285 frames_sent = frames_in_fifo; 1286 } 1287 } else { 1288 /* single frame in fifo, just clear TXOK */ 1289 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1290 } 1291 1292 while (frames_sent--) { 1293 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % 1294 priv->tx_max, NULL); 1295 priv->tx_tail++; 1296 stats->tx_packets++; 1297 } 1298 1299 netif_wake_queue(ndev); 1300 1301 spin_unlock_irqrestore(&priv->tx_lock, flags); 1302 1303 can_led_event(ndev, CAN_LED_EVENT_TX); 1304 xcan_update_error_state_after_rxtx(ndev); 1305 } 1306 1307 /** 1308 * xcan_interrupt - CAN Isr 1309 * @irq: irq number 1310 * @dev_id: device id pointer 1311 * 1312 * This is the xilinx CAN Isr. It checks for the type of interrupt 1313 * and invokes the corresponding ISR. 1314 * 1315 * Return: 1316 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise 1317 */ 1318 static irqreturn_t xcan_interrupt(int irq, void *dev_id) 1319 { 1320 struct net_device *ndev = (struct net_device *)dev_id; 1321 struct xcan_priv *priv = netdev_priv(ndev); 1322 u32 isr, ier; 1323 u32 isr_errors; 1324 u32 rx_int_mask = xcan_rx_int_mask(priv); 1325 1326 /* Get the interrupt status from Xilinx CAN */ 1327 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1328 if (!isr) 1329 return IRQ_NONE; 1330 1331 /* Check for the type of interrupt and Processing it */ 1332 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { 1333 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | 1334 XCAN_IXR_WKUP_MASK)); 1335 xcan_state_interrupt(ndev, isr); 1336 } 1337 1338 /* Check for Tx interrupt and Processing it */ 1339 if (isr & XCAN_IXR_TXOK_MASK) 1340 xcan_tx_interrupt(ndev, isr); 1341 1342 /* Check for the type of error interrupt and Processing it */ 1343 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1344 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | 1345 XCAN_IXR_RXMNF_MASK); 1346 if (isr_errors) { 1347 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 1348 xcan_err_interrupt(ndev, isr); 1349 } 1350 1351 /* Check for the type of receive interrupt and Processing it */ 1352 if (isr & rx_int_mask) { 1353 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1354 ier &= ~rx_int_mask; 1355 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1356 napi_schedule(&priv->napi); 1357 } 1358 return IRQ_HANDLED; 1359 } 1360 1361 /** 1362 * xcan_chip_stop - Driver stop routine 1363 * @ndev: Pointer to net_device structure 1364 * 1365 * This is the drivers stop routine. It will disable the 1366 * interrupts and put the device into configuration mode. 1367 */ 1368 static void xcan_chip_stop(struct net_device *ndev) 1369 { 1370 struct xcan_priv *priv = netdev_priv(ndev); 1371 int ret; 1372 1373 /* Disable interrupts and leave the can in configuration mode */ 1374 ret = set_reset_mode(ndev); 1375 if (ret < 0) 1376 netdev_dbg(ndev, "set_reset_mode() Failed\n"); 1377 1378 priv->can.state = CAN_STATE_STOPPED; 1379 } 1380 1381 /** 1382 * xcan_open - Driver open routine 1383 * @ndev: Pointer to net_device structure 1384 * 1385 * This is the driver open routine. 1386 * Return: 0 on success and failure value on error 1387 */ 1388 static int xcan_open(struct net_device *ndev) 1389 { 1390 struct xcan_priv *priv = netdev_priv(ndev); 1391 int ret; 1392 1393 ret = pm_runtime_get_sync(priv->dev); 1394 if (ret < 0) { 1395 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1396 __func__, ret); 1397 goto err; 1398 } 1399 1400 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, 1401 ndev->name, ndev); 1402 if (ret < 0) { 1403 netdev_err(ndev, "irq allocation for CAN failed\n"); 1404 goto err; 1405 } 1406 1407 /* Set chip into reset mode */ 1408 ret = set_reset_mode(ndev); 1409 if (ret < 0) { 1410 netdev_err(ndev, "mode resetting failed!\n"); 1411 goto err_irq; 1412 } 1413 1414 /* Common open */ 1415 ret = open_candev(ndev); 1416 if (ret) 1417 goto err_irq; 1418 1419 ret = xcan_chip_start(ndev); 1420 if (ret < 0) { 1421 netdev_err(ndev, "xcan_chip_start failed!\n"); 1422 goto err_candev; 1423 } 1424 1425 can_led_event(ndev, CAN_LED_EVENT_OPEN); 1426 napi_enable(&priv->napi); 1427 netif_start_queue(ndev); 1428 1429 return 0; 1430 1431 err_candev: 1432 close_candev(ndev); 1433 err_irq: 1434 free_irq(ndev->irq, ndev); 1435 err: 1436 pm_runtime_put(priv->dev); 1437 1438 return ret; 1439 } 1440 1441 /** 1442 * xcan_close - Driver close routine 1443 * @ndev: Pointer to net_device structure 1444 * 1445 * Return: 0 always 1446 */ 1447 static int xcan_close(struct net_device *ndev) 1448 { 1449 struct xcan_priv *priv = netdev_priv(ndev); 1450 1451 netif_stop_queue(ndev); 1452 napi_disable(&priv->napi); 1453 xcan_chip_stop(ndev); 1454 free_irq(ndev->irq, ndev); 1455 close_candev(ndev); 1456 1457 can_led_event(ndev, CAN_LED_EVENT_STOP); 1458 pm_runtime_put(priv->dev); 1459 1460 return 0; 1461 } 1462 1463 /** 1464 * xcan_get_berr_counter - error counter routine 1465 * @ndev: Pointer to net_device structure 1466 * @bec: Pointer to can_berr_counter structure 1467 * 1468 * This is the driver error counter routine. 1469 * Return: 0 on success and failure value on error 1470 */ 1471 static int xcan_get_berr_counter(const struct net_device *ndev, 1472 struct can_berr_counter *bec) 1473 { 1474 struct xcan_priv *priv = netdev_priv(ndev); 1475 int ret; 1476 1477 ret = pm_runtime_get_sync(priv->dev); 1478 if (ret < 0) { 1479 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1480 __func__, ret); 1481 pm_runtime_put(priv->dev); 1482 return ret; 1483 } 1484 1485 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 1486 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 1487 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 1488 1489 pm_runtime_put(priv->dev); 1490 1491 return 0; 1492 } 1493 1494 static const struct net_device_ops xcan_netdev_ops = { 1495 .ndo_open = xcan_open, 1496 .ndo_stop = xcan_close, 1497 .ndo_start_xmit = xcan_start_xmit, 1498 .ndo_change_mtu = can_change_mtu, 1499 }; 1500 1501 /** 1502 * xcan_suspend - Suspend method for the driver 1503 * @dev: Address of the device structure 1504 * 1505 * Put the driver into low power mode. 1506 * Return: 0 on success and failure value on error 1507 */ 1508 static int __maybe_unused xcan_suspend(struct device *dev) 1509 { 1510 struct net_device *ndev = dev_get_drvdata(dev); 1511 1512 if (netif_running(ndev)) { 1513 netif_stop_queue(ndev); 1514 netif_device_detach(ndev); 1515 xcan_chip_stop(ndev); 1516 } 1517 1518 return pm_runtime_force_suspend(dev); 1519 } 1520 1521 /** 1522 * xcan_resume - Resume from suspend 1523 * @dev: Address of the device structure 1524 * 1525 * Resume operation after suspend. 1526 * Return: 0 on success and failure value on error 1527 */ 1528 static int __maybe_unused xcan_resume(struct device *dev) 1529 { 1530 struct net_device *ndev = dev_get_drvdata(dev); 1531 int ret; 1532 1533 ret = pm_runtime_force_resume(dev); 1534 if (ret) { 1535 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 1536 return ret; 1537 } 1538 1539 if (netif_running(ndev)) { 1540 ret = xcan_chip_start(ndev); 1541 if (ret) { 1542 dev_err(dev, "xcan_chip_start failed on resume\n"); 1543 return ret; 1544 } 1545 1546 netif_device_attach(ndev); 1547 netif_start_queue(ndev); 1548 } 1549 1550 return 0; 1551 } 1552 1553 /** 1554 * xcan_runtime_suspend - Runtime suspend method for the driver 1555 * @dev: Address of the device structure 1556 * 1557 * Put the driver into low power mode. 1558 * Return: 0 always 1559 */ 1560 static int __maybe_unused xcan_runtime_suspend(struct device *dev) 1561 { 1562 struct net_device *ndev = dev_get_drvdata(dev); 1563 struct xcan_priv *priv = netdev_priv(ndev); 1564 1565 clk_disable_unprepare(priv->bus_clk); 1566 clk_disable_unprepare(priv->can_clk); 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * xcan_runtime_resume - Runtime resume from suspend 1573 * @dev: Address of the device structure 1574 * 1575 * Resume operation after suspend. 1576 * Return: 0 on success and failure value on error 1577 */ 1578 static int __maybe_unused xcan_runtime_resume(struct device *dev) 1579 { 1580 struct net_device *ndev = dev_get_drvdata(dev); 1581 struct xcan_priv *priv = netdev_priv(ndev); 1582 int ret; 1583 1584 ret = clk_prepare_enable(priv->bus_clk); 1585 if (ret) { 1586 dev_err(dev, "Cannot enable clock.\n"); 1587 return ret; 1588 } 1589 ret = clk_prepare_enable(priv->can_clk); 1590 if (ret) { 1591 dev_err(dev, "Cannot enable clock.\n"); 1592 clk_disable_unprepare(priv->bus_clk); 1593 return ret; 1594 } 1595 1596 return 0; 1597 } 1598 1599 static const struct dev_pm_ops xcan_dev_pm_ops = { 1600 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1601 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1602 }; 1603 1604 static const struct xcan_devtype_data xcan_zynq_data = { 1605 .cantype = XZYNQ_CANPS, 1606 .flags = XCAN_FLAG_TXFEMP, 1607 .bittiming_const = &xcan_bittiming_const, 1608 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1609 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1610 .bus_clk_name = "pclk", 1611 }; 1612 1613 static const struct xcan_devtype_data xcan_axi_data = { 1614 .cantype = XAXI_CAN, 1615 .bittiming_const = &xcan_bittiming_const, 1616 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1617 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1618 .bus_clk_name = "s_axi_aclk", 1619 }; 1620 1621 static const struct xcan_devtype_data xcan_canfd_data = { 1622 .cantype = XAXI_CANFD, 1623 .flags = XCAN_FLAG_EXT_FILTERS | 1624 XCAN_FLAG_RXMNF | 1625 XCAN_FLAG_TX_MAILBOXES | 1626 XCAN_FLAG_RX_FIFO_MULTI, 1627 .bittiming_const = &xcan_bittiming_const_canfd, 1628 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1629 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1630 .bus_clk_name = "s_axi_aclk", 1631 }; 1632 1633 static const struct xcan_devtype_data xcan_canfd2_data = { 1634 .cantype = XAXI_CANFD_2_0, 1635 .flags = XCAN_FLAG_EXT_FILTERS | 1636 XCAN_FLAG_RXMNF | 1637 XCAN_FLAG_TX_MAILBOXES | 1638 XCAN_FLAG_CANFD_2 | 1639 XCAN_FLAG_RX_FIFO_MULTI, 1640 .bittiming_const = &xcan_bittiming_const_canfd2, 1641 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1642 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1643 .bus_clk_name = "s_axi_aclk", 1644 }; 1645 1646 /* Match table for OF platform binding */ 1647 static const struct of_device_id xcan_of_match[] = { 1648 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1649 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, 1650 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, 1651 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, 1652 { /* end of list */ }, 1653 }; 1654 MODULE_DEVICE_TABLE(of, xcan_of_match); 1655 1656 /** 1657 * xcan_probe - Platform registration call 1658 * @pdev: Handle to the platform device structure 1659 * 1660 * This function does all the memory allocation and registration for the CAN 1661 * device. 1662 * 1663 * Return: 0 on success and failure value on error 1664 */ 1665 static int xcan_probe(struct platform_device *pdev) 1666 { 1667 struct net_device *ndev; 1668 struct xcan_priv *priv; 1669 const struct of_device_id *of_id; 1670 const struct xcan_devtype_data *devtype = &xcan_axi_data; 1671 void __iomem *addr; 1672 int ret; 1673 int rx_max, tx_max; 1674 u32 hw_tx_max = 0, hw_rx_max = 0; 1675 const char *hw_tx_max_property; 1676 1677 /* Get the virtual base address for the device */ 1678 addr = devm_platform_ioremap_resource(pdev, 0); 1679 if (IS_ERR(addr)) { 1680 ret = PTR_ERR(addr); 1681 goto err; 1682 } 1683 1684 of_id = of_match_device(xcan_of_match, &pdev->dev); 1685 if (of_id && of_id->data) 1686 devtype = of_id->data; 1687 1688 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? 1689 "tx-mailbox-count" : "tx-fifo-depth"; 1690 1691 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, 1692 &hw_tx_max); 1693 if (ret < 0) { 1694 dev_err(&pdev->dev, "missing %s property\n", 1695 hw_tx_max_property); 1696 goto err; 1697 } 1698 1699 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", 1700 &hw_rx_max); 1701 if (ret < 0) { 1702 dev_err(&pdev->dev, 1703 "missing rx-fifo-depth property (mailbox mode is not supported)\n"); 1704 goto err; 1705 } 1706 1707 /* With TX FIFO: 1708 * 1709 * There is no way to directly figure out how many frames have been 1710 * sent when the TXOK interrupt is processed. If TXFEMP 1711 * is supported, we can have 2 frames in the FIFO and use TXFEMP 1712 * to determine if 1 or 2 frames have been sent. 1713 * Theoretically we should be able to use TXFWMEMP to determine up 1714 * to 3 frames, but it seems that after putting a second frame in the 1715 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1716 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1717 * sent), which is not a sensible state - possibly TXFWMEMP is not 1718 * completely synchronized with the rest of the bits? 1719 * 1720 * With TX mailboxes: 1721 * 1722 * HW sends frames in CAN ID priority order. To preserve FIFO ordering 1723 * we submit frames one at a time. 1724 */ 1725 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && 1726 (devtype->flags & XCAN_FLAG_TXFEMP)) 1727 tx_max = min(hw_tx_max, 2U); 1728 else 1729 tx_max = 1; 1730 1731 rx_max = hw_rx_max; 1732 1733 /* Create a CAN device instance */ 1734 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1735 if (!ndev) 1736 return -ENOMEM; 1737 1738 priv = netdev_priv(ndev); 1739 priv->dev = &pdev->dev; 1740 priv->can.bittiming_const = devtype->bittiming_const; 1741 priv->can.do_set_mode = xcan_do_set_mode; 1742 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1743 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1744 CAN_CTRLMODE_BERR_REPORTING; 1745 1746 if (devtype->cantype == XAXI_CANFD) 1747 priv->can.data_bittiming_const = 1748 &xcan_data_bittiming_const_canfd; 1749 1750 if (devtype->cantype == XAXI_CANFD_2_0) 1751 priv->can.data_bittiming_const = 1752 &xcan_data_bittiming_const_canfd2; 1753 1754 if (devtype->cantype == XAXI_CANFD || 1755 devtype->cantype == XAXI_CANFD_2_0) 1756 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; 1757 1758 priv->reg_base = addr; 1759 priv->tx_max = tx_max; 1760 priv->devtype = *devtype; 1761 spin_lock_init(&priv->tx_lock); 1762 1763 /* Get IRQ for the device */ 1764 ndev->irq = platform_get_irq(pdev, 0); 1765 ndev->flags |= IFF_ECHO; /* We support local echo */ 1766 1767 platform_set_drvdata(pdev, ndev); 1768 SET_NETDEV_DEV(ndev, &pdev->dev); 1769 ndev->netdev_ops = &xcan_netdev_ops; 1770 1771 /* Getting the CAN can_clk info */ 1772 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1773 if (IS_ERR(priv->can_clk)) { 1774 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), 1775 "device clock not found\n"); 1776 goto err_free; 1777 } 1778 1779 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1780 if (IS_ERR(priv->bus_clk)) { 1781 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), 1782 "bus clock not found\n"); 1783 goto err_free; 1784 } 1785 1786 priv->write_reg = xcan_write_reg_le; 1787 priv->read_reg = xcan_read_reg_le; 1788 1789 pm_runtime_enable(&pdev->dev); 1790 ret = pm_runtime_get_sync(&pdev->dev); 1791 if (ret < 0) { 1792 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1793 __func__, ret); 1794 goto err_disableclks; 1795 } 1796 1797 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { 1798 priv->write_reg = xcan_write_reg_be; 1799 priv->read_reg = xcan_read_reg_be; 1800 } 1801 1802 priv->can.clock.freq = clk_get_rate(priv->can_clk); 1803 1804 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); 1805 1806 ret = register_candev(ndev); 1807 if (ret) { 1808 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); 1809 goto err_disableclks; 1810 } 1811 1812 devm_can_led_init(ndev); 1813 1814 pm_runtime_put(&pdev->dev); 1815 1816 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { 1817 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); 1818 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); 1819 } 1820 1821 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", 1822 priv->reg_base, ndev->irq, priv->can.clock.freq, 1823 hw_tx_max, priv->tx_max); 1824 1825 return 0; 1826 1827 err_disableclks: 1828 pm_runtime_put(priv->dev); 1829 pm_runtime_disable(&pdev->dev); 1830 err_free: 1831 free_candev(ndev); 1832 err: 1833 return ret; 1834 } 1835 1836 /** 1837 * xcan_remove - Unregister the device after releasing the resources 1838 * @pdev: Handle to the platform device structure 1839 * 1840 * This function frees all the resources allocated to the device. 1841 * Return: 0 always 1842 */ 1843 static int xcan_remove(struct platform_device *pdev) 1844 { 1845 struct net_device *ndev = platform_get_drvdata(pdev); 1846 1847 unregister_candev(ndev); 1848 pm_runtime_disable(&pdev->dev); 1849 free_candev(ndev); 1850 1851 return 0; 1852 } 1853 1854 static struct platform_driver xcan_driver = { 1855 .probe = xcan_probe, 1856 .remove = xcan_remove, 1857 .driver = { 1858 .name = DRIVER_NAME, 1859 .pm = &xcan_dev_pm_ops, 1860 .of_match_table = xcan_of_match, 1861 }, 1862 }; 1863 1864 module_platform_driver(xcan_driver); 1865 1866 MODULE_LICENSE("GPL"); 1867 MODULE_AUTHOR("Xilinx Inc"); 1868 MODULE_DESCRIPTION("Xilinx CAN interface"); 1869