1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xilinx CAN device driver 3 * 4 * Copyright (C) 2012 - 2014 Xilinx, Inc. 5 * Copyright (C) 2009 PetaLogix. All rights reserved. 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy 7 * 8 * Description: 9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/skbuff.h> 24 #include <linux/spinlock.h> 25 #include <linux/string.h> 26 #include <linux/types.h> 27 #include <linux/can/dev.h> 28 #include <linux/can/error.h> 29 #include <linux/can/led.h> 30 #include <linux/pm_runtime.h> 31 32 #define DRIVER_NAME "xilinx_can" 33 34 /* CAN registers set */ 35 enum xcan_reg { 36 XCAN_SRR_OFFSET = 0x00, /* Software reset */ 37 XCAN_MSR_OFFSET = 0x04, /* Mode select */ 38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ 39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ 40 XCAN_ECR_OFFSET = 0x10, /* Error counter */ 41 XCAN_ESR_OFFSET = 0x14, /* Error status */ 42 XCAN_SR_OFFSET = 0x18, /* Status */ 43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 46 47 /* not on CAN FD cores */ 48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ 49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ 50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ 51 52 /* only on CAN FD cores */ 53 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate 54 * Prescalar 55 */ 56 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ 57 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ 58 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ 59 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ 60 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ 61 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ 62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ 63 }; 64 65 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) 66 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) 67 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) 68 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) 69 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) 70 71 #define XCAN_CANFD_FRAME_SIZE 0x48 72 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ 73 XCAN_CANFD_FRAME_SIZE * (n)) 74 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ 75 XCAN_CANFD_FRAME_SIZE * (n)) 76 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ 77 XCAN_CANFD_FRAME_SIZE * (n)) 78 79 /* the single TX mailbox used by this driver on CAN FD HW */ 80 #define XCAN_TX_MAILBOX_IDX 0 81 82 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 83 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 84 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 85 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ 86 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ 87 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ 88 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 89 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 90 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 91 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ 92 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ 93 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ 94 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 95 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 96 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 97 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ 98 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ 99 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ 100 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ 101 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ 102 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ 103 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ 104 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 105 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 106 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 107 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ 108 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 109 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 110 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 111 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ 112 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ 113 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ 114 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ 115 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ 116 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ 117 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ 118 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ 119 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ 120 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ 121 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ 122 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 123 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 124 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 125 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ 126 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ 127 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ 128 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ 129 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ 130 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ 131 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ 132 133 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 134 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 135 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 136 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ 137 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ 138 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 139 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 140 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 141 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ 142 143 /* CAN frame length constants */ 144 #define XCAN_FRAME_MAX_DATA_LEN 8 145 #define XCANFD_DW_BYTES 4 146 #define XCAN_TIMEOUT (1 * HZ) 147 148 /* TX-FIFO-empty interrupt available */ 149 #define XCAN_FLAG_TXFEMP 0x0001 150 /* RX Match Not Finished interrupt available */ 151 #define XCAN_FLAG_RXMNF 0x0002 152 /* Extended acceptance filters with control at 0xE0 */ 153 #define XCAN_FLAG_EXT_FILTERS 0x0004 154 /* TX mailboxes instead of TX FIFO */ 155 #define XCAN_FLAG_TX_MAILBOXES 0x0008 156 /* RX FIFO with each buffer in separate registers at 0x1100 157 * instead of the regular FIFO at 0x50 158 */ 159 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 160 #define XCAN_FLAG_CANFD_2 0x0020 161 162 enum xcan_ip_type { 163 XAXI_CAN = 0, 164 XZYNQ_CANPS, 165 XAXI_CANFD, 166 XAXI_CANFD_2_0, 167 }; 168 169 struct xcan_devtype_data { 170 enum xcan_ip_type cantype; 171 unsigned int flags; 172 const struct can_bittiming_const *bittiming_const; 173 const char *bus_clk_name; 174 unsigned int btr_ts2_shift; 175 unsigned int btr_sjw_shift; 176 }; 177 178 /** 179 * struct xcan_priv - This definition define CAN driver instance 180 * @can: CAN private data structure. 181 * @tx_lock: Lock for synchronizing TX interrupt handling 182 * @tx_head: Tx CAN packets ready to send on the queue 183 * @tx_tail: Tx CAN packets successfully sended on the queue 184 * @tx_max: Maximum number packets the driver can send 185 * @napi: NAPI structure 186 * @read_reg: For reading data from CAN registers 187 * @write_reg: For writing data to CAN registers 188 * @dev: Network device data structure 189 * @reg_base: Ioremapped address to registers 190 * @irq_flags: For request_irq() 191 * @bus_clk: Pointer to struct clk 192 * @can_clk: Pointer to struct clk 193 * @devtype: Device type specific constants 194 */ 195 struct xcan_priv { 196 struct can_priv can; 197 spinlock_t tx_lock; 198 unsigned int tx_head; 199 unsigned int tx_tail; 200 unsigned int tx_max; 201 struct napi_struct napi; 202 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); 203 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, 204 u32 val); 205 struct device *dev; 206 void __iomem *reg_base; 207 unsigned long irq_flags; 208 struct clk *bus_clk; 209 struct clk *can_clk; 210 struct xcan_devtype_data devtype; 211 }; 212 213 /* CAN Bittiming constants as per Xilinx CAN specs */ 214 static const struct can_bittiming_const xcan_bittiming_const = { 215 .name = DRIVER_NAME, 216 .tseg1_min = 1, 217 .tseg1_max = 16, 218 .tseg2_min = 1, 219 .tseg2_max = 8, 220 .sjw_max = 4, 221 .brp_min = 1, 222 .brp_max = 256, 223 .brp_inc = 1, 224 }; 225 226 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ 227 static const struct can_bittiming_const xcan_bittiming_const_canfd = { 228 .name = DRIVER_NAME, 229 .tseg1_min = 1, 230 .tseg1_max = 64, 231 .tseg2_min = 1, 232 .tseg2_max = 16, 233 .sjw_max = 16, 234 .brp_min = 1, 235 .brp_max = 256, 236 .brp_inc = 1, 237 }; 238 239 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ 240 static struct can_bittiming_const xcan_data_bittiming_const_canfd = { 241 .name = DRIVER_NAME, 242 .tseg1_min = 1, 243 .tseg1_max = 16, 244 .tseg2_min = 1, 245 .tseg2_max = 8, 246 .sjw_max = 8, 247 .brp_min = 1, 248 .brp_max = 256, 249 .brp_inc = 1, 250 }; 251 252 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ 253 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { 254 .name = DRIVER_NAME, 255 .tseg1_min = 1, 256 .tseg1_max = 256, 257 .tseg2_min = 1, 258 .tseg2_max = 128, 259 .sjw_max = 128, 260 .brp_min = 1, 261 .brp_max = 256, 262 .brp_inc = 1, 263 }; 264 265 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ 266 static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { 267 .name = DRIVER_NAME, 268 .tseg1_min = 1, 269 .tseg1_max = 32, 270 .tseg2_min = 1, 271 .tseg2_max = 16, 272 .sjw_max = 16, 273 .brp_min = 1, 274 .brp_max = 256, 275 .brp_inc = 1, 276 }; 277 278 /** 279 * xcan_write_reg_le - Write a value to the device register little endian 280 * @priv: Driver private data structure 281 * @reg: Register offset 282 * @val: Value to write at the Register offset 283 * 284 * Write data to the paricular CAN register 285 */ 286 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, 287 u32 val) 288 { 289 iowrite32(val, priv->reg_base + reg); 290 } 291 292 /** 293 * xcan_read_reg_le - Read a value from the device register little endian 294 * @priv: Driver private data structure 295 * @reg: Register offset 296 * 297 * Read data from the particular CAN register 298 * Return: value read from the CAN register 299 */ 300 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) 301 { 302 return ioread32(priv->reg_base + reg); 303 } 304 305 /** 306 * xcan_write_reg_be - Write a value to the device register big endian 307 * @priv: Driver private data structure 308 * @reg: Register offset 309 * @val: Value to write at the Register offset 310 * 311 * Write data to the paricular CAN register 312 */ 313 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, 314 u32 val) 315 { 316 iowrite32be(val, priv->reg_base + reg); 317 } 318 319 /** 320 * xcan_read_reg_be - Read a value from the device register big endian 321 * @priv: Driver private data structure 322 * @reg: Register offset 323 * 324 * Read data from the particular CAN register 325 * Return: value read from the CAN register 326 */ 327 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) 328 { 329 return ioread32be(priv->reg_base + reg); 330 } 331 332 /** 333 * xcan_rx_int_mask - Get the mask for the receive interrupt 334 * @priv: Driver private data structure 335 * 336 * Return: The receive interrupt mask used by the driver on this HW 337 */ 338 static u32 xcan_rx_int_mask(const struct xcan_priv *priv) 339 { 340 /* RXNEMP is better suited for our use case as it cannot be cleared 341 * while the FIFO is non-empty, but CAN FD HW does not have it 342 */ 343 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 344 return XCAN_IXR_RXOK_MASK; 345 else 346 return XCAN_IXR_RXNEMP_MASK; 347 } 348 349 /** 350 * set_reset_mode - Resets the CAN device mode 351 * @ndev: Pointer to net_device structure 352 * 353 * This is the driver reset mode routine.The driver 354 * enters into configuration mode. 355 * 356 * Return: 0 on success and failure value on error 357 */ 358 static int set_reset_mode(struct net_device *ndev) 359 { 360 struct xcan_priv *priv = netdev_priv(ndev); 361 unsigned long timeout; 362 363 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 364 365 timeout = jiffies + XCAN_TIMEOUT; 366 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { 367 if (time_after(jiffies, timeout)) { 368 netdev_warn(ndev, "timed out for config mode\n"); 369 return -ETIMEDOUT; 370 } 371 usleep_range(500, 10000); 372 } 373 374 /* reset clears FIFOs */ 375 priv->tx_head = 0; 376 priv->tx_tail = 0; 377 378 return 0; 379 } 380 381 /** 382 * xcan_set_bittiming - CAN set bit timing routine 383 * @ndev: Pointer to net_device structure 384 * 385 * This is the driver set bittiming routine. 386 * Return: 0 on success and failure value on error 387 */ 388 static int xcan_set_bittiming(struct net_device *ndev) 389 { 390 struct xcan_priv *priv = netdev_priv(ndev); 391 struct can_bittiming *bt = &priv->can.bittiming; 392 struct can_bittiming *dbt = &priv->can.data_bittiming; 393 u32 btr0, btr1; 394 u32 is_config_mode; 395 396 /* Check whether Xilinx CAN is in configuration mode. 397 * It cannot set bit timing if Xilinx CAN is not in configuration mode. 398 */ 399 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & 400 XCAN_SR_CONFIG_MASK; 401 if (!is_config_mode) { 402 netdev_alert(ndev, 403 "BUG! Cannot set bittiming - CAN is not in config mode\n"); 404 return -EPERM; 405 } 406 407 /* Setting Baud Rate prescalar value in BRPR Register */ 408 btr0 = (bt->brp - 1); 409 410 /* Setting Time Segment 1 in BTR Register */ 411 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 412 413 /* Setting Time Segment 2 in BTR Register */ 414 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 415 416 /* Setting Synchronous jump width in BTR Register */ 417 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; 418 419 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 420 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 421 422 if (priv->devtype.cantype == XAXI_CANFD || 423 priv->devtype.cantype == XAXI_CANFD_2_0) { 424 /* Setting Baud Rate prescalar value in F_BRPR Register */ 425 btr0 = dbt->brp - 1; 426 427 /* Setting Time Segment 1 in BTR Register */ 428 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; 429 430 /* Setting Time Segment 2 in BTR Register */ 431 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 432 433 /* Setting Synchronous jump width in BTR Register */ 434 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; 435 436 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); 437 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); 438 } 439 440 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", 441 priv->read_reg(priv, XCAN_BRPR_OFFSET), 442 priv->read_reg(priv, XCAN_BTR_OFFSET)); 443 444 return 0; 445 } 446 447 /** 448 * xcan_chip_start - This the drivers start routine 449 * @ndev: Pointer to net_device structure 450 * 451 * This is the drivers start routine. 452 * Based on the State of the CAN device it puts 453 * the CAN device into a proper mode. 454 * 455 * Return: 0 on success and failure value on error 456 */ 457 static int xcan_chip_start(struct net_device *ndev) 458 { 459 struct xcan_priv *priv = netdev_priv(ndev); 460 u32 reg_msr; 461 int err; 462 u32 ier; 463 464 /* Check if it is in reset mode */ 465 err = set_reset_mode(ndev); 466 if (err < 0) 467 return err; 468 469 err = xcan_set_bittiming(ndev); 470 if (err < 0) 471 return err; 472 473 /* Enable interrupts */ 474 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 475 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 476 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 477 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); 478 479 if (priv->devtype.flags & XCAN_FLAG_RXMNF) 480 ier |= XCAN_IXR_RXMNF_MASK; 481 482 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 483 484 /* Check whether it is loopback mode or normal mode */ 485 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 486 reg_msr = XCAN_MSR_LBACK_MASK; 487 } else { 488 reg_msr = 0x0; 489 } 490 491 /* enable the first extended filter, if any, as cores with extended 492 * filtering default to non-receipt if all filters are disabled 493 */ 494 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) 495 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); 496 497 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 498 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 499 500 netdev_dbg(ndev, "status:#x%08x\n", 501 priv->read_reg(priv, XCAN_SR_OFFSET)); 502 503 priv->can.state = CAN_STATE_ERROR_ACTIVE; 504 return 0; 505 } 506 507 /** 508 * xcan_do_set_mode - This sets the mode of the driver 509 * @ndev: Pointer to net_device structure 510 * @mode: Tells the mode of the driver 511 * 512 * This check the drivers state and calls the 513 * the corresponding modes to set. 514 * 515 * Return: 0 on success and failure value on error 516 */ 517 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 518 { 519 int ret; 520 521 switch (mode) { 522 case CAN_MODE_START: 523 ret = xcan_chip_start(ndev); 524 if (ret < 0) { 525 netdev_err(ndev, "xcan_chip_start failed!\n"); 526 return ret; 527 } 528 netif_wake_queue(ndev); 529 break; 530 default: 531 ret = -EOPNOTSUPP; 532 break; 533 } 534 535 return ret; 536 } 537 538 /** 539 * xcan_write_frame - Write a frame to HW 540 * @priv: Driver private data structure 541 * @skb: sk_buff pointer that contains data to be Txed 542 * @frame_offset: Register offset to write the frame to 543 */ 544 static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, 545 int frame_offset) 546 { 547 u32 id, dlc, data[2] = {0, 0}; 548 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 549 u32 ramoff, dwindex = 0, i; 550 551 /* Watch carefully on the bit sequence */ 552 if (cf->can_id & CAN_EFF_FLAG) { 553 /* Extended CAN ID format */ 554 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & 555 XCAN_IDR_ID2_MASK; 556 id |= (((cf->can_id & CAN_EFF_MASK) >> 557 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << 558 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; 559 560 /* The substibute remote TX request bit should be "1" 561 * for extended frames as in the Xilinx CAN datasheet 562 */ 563 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; 564 565 if (cf->can_id & CAN_RTR_FLAG) 566 /* Extended frames remote TX request */ 567 id |= XCAN_IDR_RTR_MASK; 568 } else { 569 /* Standard CAN ID format */ 570 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & 571 XCAN_IDR_ID1_MASK; 572 573 if (cf->can_id & CAN_RTR_FLAG) 574 /* Standard frames remote TX request */ 575 id |= XCAN_IDR_SRR_MASK; 576 } 577 578 dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; 579 if (can_is_canfd_skb(skb)) { 580 if (cf->flags & CANFD_BRS) 581 dlc |= XCAN_DLCR_BRS_MASK; 582 dlc |= XCAN_DLCR_EDL_MASK; 583 } 584 585 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 586 /* If the CAN frame is RTR frame this write triggers transmission 587 * (not on CAN FD) 588 */ 589 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); 590 if (priv->devtype.cantype == XAXI_CANFD || 591 priv->devtype.cantype == XAXI_CANFD_2_0) { 592 for (i = 0; i < cf->len; i += 4) { 593 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + 594 (dwindex * XCANFD_DW_BYTES); 595 priv->write_reg(priv, ramoff, 596 be32_to_cpup((__be32 *)(cf->data + i))); 597 dwindex++; 598 } 599 } else { 600 if (cf->len > 0) 601 data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); 602 if (cf->len > 4) 603 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 604 605 if (!(cf->can_id & CAN_RTR_FLAG)) { 606 priv->write_reg(priv, 607 XCAN_FRAME_DW1_OFFSET(frame_offset), 608 data[0]); 609 /* If the CAN frame is Standard/Extended frame this 610 * write triggers transmission (not on CAN FD) 611 */ 612 priv->write_reg(priv, 613 XCAN_FRAME_DW2_OFFSET(frame_offset), 614 data[1]); 615 } 616 } 617 } 618 619 /** 620 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) 621 * @skb: sk_buff pointer that contains data to be Txed 622 * @ndev: Pointer to net_device structure 623 * 624 * Return: 0 on success, -ENOSPC if FIFO is full. 625 */ 626 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) 627 { 628 struct xcan_priv *priv = netdev_priv(ndev); 629 unsigned long flags; 630 631 /* Check if the TX buffer is full */ 632 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & 633 XCAN_SR_TXFLL_MASK)) 634 return -ENOSPC; 635 636 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 637 638 spin_lock_irqsave(&priv->tx_lock, flags); 639 640 priv->tx_head++; 641 642 xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET); 643 644 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 645 if (priv->tx_max > 1) 646 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 647 648 /* Check if the TX buffer is full */ 649 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 650 netif_stop_queue(ndev); 651 652 spin_unlock_irqrestore(&priv->tx_lock, flags); 653 654 return 0; 655 } 656 657 /** 658 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) 659 * @skb: sk_buff pointer that contains data to be Txed 660 * @ndev: Pointer to net_device structure 661 * 662 * Return: 0 on success, -ENOSPC if there is no space 663 */ 664 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) 665 { 666 struct xcan_priv *priv = netdev_priv(ndev); 667 unsigned long flags; 668 669 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & 670 BIT(XCAN_TX_MAILBOX_IDX))) 671 return -ENOSPC; 672 673 can_put_echo_skb(skb, ndev, 0); 674 675 spin_lock_irqsave(&priv->tx_lock, flags); 676 677 priv->tx_head++; 678 679 xcan_write_frame(priv, skb, 680 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 681 682 /* Mark buffer as ready for transmit */ 683 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); 684 685 netif_stop_queue(ndev); 686 687 spin_unlock_irqrestore(&priv->tx_lock, flags); 688 689 return 0; 690 } 691 692 /** 693 * xcan_start_xmit - Starts the transmission 694 * @skb: sk_buff pointer that contains data to be Txed 695 * @ndev: Pointer to net_device structure 696 * 697 * This function is invoked from upper layers to initiate transmission. 698 * 699 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full 700 */ 701 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 702 { 703 struct xcan_priv *priv = netdev_priv(ndev); 704 int ret; 705 706 if (can_dropped_invalid_skb(ndev, skb)) 707 return NETDEV_TX_OK; 708 709 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) 710 ret = xcan_start_xmit_mailbox(skb, ndev); 711 else 712 ret = xcan_start_xmit_fifo(skb, ndev); 713 714 if (ret < 0) { 715 netdev_err(ndev, "BUG!, TX full when queue awake!\n"); 716 netif_stop_queue(ndev); 717 return NETDEV_TX_BUSY; 718 } 719 720 return NETDEV_TX_OK; 721 } 722 723 /** 724 * xcan_rx - Is called from CAN isr to complete the received 725 * frame processing 726 * @ndev: Pointer to net_device structure 727 * @frame_base: Register offset to the frame to be read 728 * 729 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 730 * does minimal processing and invokes "netif_receive_skb" to complete further 731 * processing. 732 * Return: 1 on success and 0 on failure. 733 */ 734 static int xcan_rx(struct net_device *ndev, int frame_base) 735 { 736 struct xcan_priv *priv = netdev_priv(ndev); 737 struct net_device_stats *stats = &ndev->stats; 738 struct can_frame *cf; 739 struct sk_buff *skb; 740 u32 id_xcan, dlc, data[2] = {0, 0}; 741 742 skb = alloc_can_skb(ndev, &cf); 743 if (unlikely(!skb)) { 744 stats->rx_dropped++; 745 return 0; 746 } 747 748 /* Read a frame from Xilinx zynq CANPS */ 749 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 750 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> 751 XCAN_DLCR_DLC_SHIFT; 752 753 /* Change Xilinx CAN data length format to socketCAN data format */ 754 cf->can_dlc = get_can_dlc(dlc); 755 756 /* Change Xilinx CAN ID format to socketCAN ID format */ 757 if (id_xcan & XCAN_IDR_IDE_MASK) { 758 /* The received frame is an Extended format frame */ 759 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 760 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 761 XCAN_IDR_ID2_SHIFT; 762 cf->can_id |= CAN_EFF_FLAG; 763 if (id_xcan & XCAN_IDR_RTR_MASK) 764 cf->can_id |= CAN_RTR_FLAG; 765 } else { 766 /* The received frame is a standard format frame */ 767 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 768 XCAN_IDR_ID1_SHIFT; 769 if (id_xcan & XCAN_IDR_SRR_MASK) 770 cf->can_id |= CAN_RTR_FLAG; 771 } 772 773 /* DW1/DW2 must always be read to remove message from RXFIFO */ 774 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); 775 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); 776 777 if (!(cf->can_id & CAN_RTR_FLAG)) { 778 /* Change Xilinx CAN data format to socketCAN data format */ 779 if (cf->can_dlc > 0) 780 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 781 if (cf->can_dlc > 4) 782 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); 783 } 784 785 stats->rx_bytes += cf->can_dlc; 786 stats->rx_packets++; 787 netif_receive_skb(skb); 788 789 return 1; 790 } 791 792 /** 793 * xcanfd_rx - Is called from CAN isr to complete the received 794 * frame processing 795 * @ndev: Pointer to net_device structure 796 * @frame_base: Register offset to the frame to be read 797 * 798 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 799 * does minimal processing and invokes "netif_receive_skb" to complete further 800 * processing. 801 * Return: 1 on success and 0 on failure. 802 */ 803 static int xcanfd_rx(struct net_device *ndev, int frame_base) 804 { 805 struct xcan_priv *priv = netdev_priv(ndev); 806 struct net_device_stats *stats = &ndev->stats; 807 struct canfd_frame *cf; 808 struct sk_buff *skb; 809 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; 810 811 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 812 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); 813 if (dlc & XCAN_DLCR_EDL_MASK) 814 skb = alloc_canfd_skb(ndev, &cf); 815 else 816 skb = alloc_can_skb(ndev, (struct can_frame **)&cf); 817 818 if (unlikely(!skb)) { 819 stats->rx_dropped++; 820 return 0; 821 } 822 823 /* Change Xilinx CANFD data length format to socketCAN data 824 * format 825 */ 826 if (dlc & XCAN_DLCR_EDL_MASK) 827 cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 828 XCAN_DLCR_DLC_SHIFT); 829 else 830 cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >> 831 XCAN_DLCR_DLC_SHIFT); 832 833 /* Change Xilinx CAN ID format to socketCAN ID format */ 834 if (id_xcan & XCAN_IDR_IDE_MASK) { 835 /* The received frame is an Extended format frame */ 836 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 837 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 838 XCAN_IDR_ID2_SHIFT; 839 cf->can_id |= CAN_EFF_FLAG; 840 if (id_xcan & XCAN_IDR_RTR_MASK) 841 cf->can_id |= CAN_RTR_FLAG; 842 } else { 843 /* The received frame is a standard format frame */ 844 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 845 XCAN_IDR_ID1_SHIFT; 846 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & 847 XCAN_IDR_SRR_MASK)) 848 cf->can_id |= CAN_RTR_FLAG; 849 } 850 851 /* Check the frame received is FD or not*/ 852 if (dlc & XCAN_DLCR_EDL_MASK) { 853 for (i = 0; i < cf->len; i += 4) { 854 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + 855 (dwindex * XCANFD_DW_BYTES); 856 data[0] = priv->read_reg(priv, dw_offset); 857 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 858 dwindex++; 859 } 860 } else { 861 for (i = 0; i < cf->len; i += 4) { 862 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); 863 data[0] = priv->read_reg(priv, dw_offset + i); 864 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 865 } 866 } 867 stats->rx_bytes += cf->len; 868 stats->rx_packets++; 869 netif_receive_skb(skb); 870 871 return 1; 872 } 873 874 /** 875 * xcan_current_error_state - Get current error state from HW 876 * @ndev: Pointer to net_device structure 877 * 878 * Checks the current CAN error state from the HW. Note that this 879 * only checks for ERROR_PASSIVE and ERROR_WARNING. 880 * 881 * Return: 882 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 883 * otherwise. 884 */ 885 static enum can_state xcan_current_error_state(struct net_device *ndev) 886 { 887 struct xcan_priv *priv = netdev_priv(ndev); 888 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 889 890 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 891 return CAN_STATE_ERROR_PASSIVE; 892 else if (status & XCAN_SR_ERRWRN_MASK) 893 return CAN_STATE_ERROR_WARNING; 894 else 895 return CAN_STATE_ERROR_ACTIVE; 896 } 897 898 /** 899 * xcan_set_error_state - Set new CAN error state 900 * @ndev: Pointer to net_device structure 901 * @new_state: The new CAN state to be set 902 * @cf: Error frame to be populated or NULL 903 * 904 * Set new CAN error state for the device, updating statistics and 905 * populating the error frame if given. 906 */ 907 static void xcan_set_error_state(struct net_device *ndev, 908 enum can_state new_state, 909 struct can_frame *cf) 910 { 911 struct xcan_priv *priv = netdev_priv(ndev); 912 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 913 u32 txerr = ecr & XCAN_ECR_TEC_MASK; 914 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 915 enum can_state tx_state = txerr >= rxerr ? new_state : 0; 916 enum can_state rx_state = txerr <= rxerr ? new_state : 0; 917 918 /* non-ERROR states are handled elsewhere */ 919 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) 920 return; 921 922 can_change_state(ndev, cf, tx_state, rx_state); 923 924 if (cf) { 925 cf->data[6] = txerr; 926 cf->data[7] = rxerr; 927 } 928 } 929 930 /** 931 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 932 * @ndev: Pointer to net_device structure 933 * 934 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 935 * the performed RX/TX has caused it to drop to a lesser state and set 936 * the interface state accordingly. 937 */ 938 static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 939 { 940 struct xcan_priv *priv = netdev_priv(ndev); 941 enum can_state old_state = priv->can.state; 942 enum can_state new_state; 943 944 /* changing error state due to successful frame RX/TX can only 945 * occur from these states 946 */ 947 if (old_state != CAN_STATE_ERROR_WARNING && 948 old_state != CAN_STATE_ERROR_PASSIVE) 949 return; 950 951 new_state = xcan_current_error_state(ndev); 952 953 if (new_state != old_state) { 954 struct sk_buff *skb; 955 struct can_frame *cf; 956 957 skb = alloc_can_err_skb(ndev, &cf); 958 959 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 960 961 if (skb) { 962 struct net_device_stats *stats = &ndev->stats; 963 964 stats->rx_packets++; 965 stats->rx_bytes += cf->can_dlc; 966 netif_rx(skb); 967 } 968 } 969 } 970 971 /** 972 * xcan_err_interrupt - error frame Isr 973 * @ndev: net_device pointer 974 * @isr: interrupt status register value 975 * 976 * This is the CAN error interrupt and it will 977 * check the the type of error and forward the error 978 * frame to upper layers. 979 */ 980 static void xcan_err_interrupt(struct net_device *ndev, u32 isr) 981 { 982 struct xcan_priv *priv = netdev_priv(ndev); 983 struct net_device_stats *stats = &ndev->stats; 984 struct can_frame *cf; 985 struct sk_buff *skb; 986 u32 err_status; 987 988 skb = alloc_can_err_skb(ndev, &cf); 989 990 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 991 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 992 993 if (isr & XCAN_IXR_BSOFF_MASK) { 994 priv->can.state = CAN_STATE_BUS_OFF; 995 priv->can.can_stats.bus_off++; 996 /* Leave device in Config Mode in bus-off state */ 997 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 998 can_bus_off(ndev); 999 if (skb) 1000 cf->can_id |= CAN_ERR_BUSOFF; 1001 } else { 1002 enum can_state new_state = xcan_current_error_state(ndev); 1003 1004 if (new_state != priv->can.state) 1005 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 1006 } 1007 1008 /* Check for Arbitration lost interrupt */ 1009 if (isr & XCAN_IXR_ARBLST_MASK) { 1010 priv->can.can_stats.arbitration_lost++; 1011 if (skb) { 1012 cf->can_id |= CAN_ERR_LOSTARB; 1013 cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; 1014 } 1015 } 1016 1017 /* Check for RX FIFO Overflow interrupt */ 1018 if (isr & XCAN_IXR_RXOFLW_MASK) { 1019 stats->rx_over_errors++; 1020 stats->rx_errors++; 1021 if (skb) { 1022 cf->can_id |= CAN_ERR_CRTL; 1023 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1024 } 1025 } 1026 1027 /* Check for RX Match Not Finished interrupt */ 1028 if (isr & XCAN_IXR_RXMNF_MASK) { 1029 stats->rx_dropped++; 1030 stats->rx_errors++; 1031 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1032 if (skb) { 1033 cf->can_id |= CAN_ERR_CRTL; 1034 cf->data[1] |= CAN_ERR_CRTL_UNSPEC; 1035 } 1036 } 1037 1038 /* Check for error interrupt */ 1039 if (isr & XCAN_IXR_ERROR_MASK) { 1040 if (skb) 1041 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1042 1043 /* Check for Ack error interrupt */ 1044 if (err_status & XCAN_ESR_ACKER_MASK) { 1045 stats->tx_errors++; 1046 if (skb) { 1047 cf->can_id |= CAN_ERR_ACK; 1048 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 1049 } 1050 } 1051 1052 /* Check for Bit error interrupt */ 1053 if (err_status & XCAN_ESR_BERR_MASK) { 1054 stats->tx_errors++; 1055 if (skb) { 1056 cf->can_id |= CAN_ERR_PROT; 1057 cf->data[2] = CAN_ERR_PROT_BIT; 1058 } 1059 } 1060 1061 /* Check for Stuff error interrupt */ 1062 if (err_status & XCAN_ESR_STER_MASK) { 1063 stats->rx_errors++; 1064 if (skb) { 1065 cf->can_id |= CAN_ERR_PROT; 1066 cf->data[2] = CAN_ERR_PROT_STUFF; 1067 } 1068 } 1069 1070 /* Check for Form error interrupt */ 1071 if (err_status & XCAN_ESR_FMER_MASK) { 1072 stats->rx_errors++; 1073 if (skb) { 1074 cf->can_id |= CAN_ERR_PROT; 1075 cf->data[2] = CAN_ERR_PROT_FORM; 1076 } 1077 } 1078 1079 /* Check for CRC error interrupt */ 1080 if (err_status & XCAN_ESR_CRCER_MASK) { 1081 stats->rx_errors++; 1082 if (skb) { 1083 cf->can_id |= CAN_ERR_PROT; 1084 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1085 } 1086 } 1087 priv->can.can_stats.bus_error++; 1088 } 1089 1090 if (skb) { 1091 stats->rx_packets++; 1092 stats->rx_bytes += cf->can_dlc; 1093 netif_rx(skb); 1094 } 1095 1096 netdev_dbg(ndev, "%s: error status register:0x%x\n", 1097 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); 1098 } 1099 1100 /** 1101 * xcan_state_interrupt - It will check the state of the CAN device 1102 * @ndev: net_device pointer 1103 * @isr: interrupt status register value 1104 * 1105 * This will checks the state of the CAN device 1106 * and puts the device into appropriate state. 1107 */ 1108 static void xcan_state_interrupt(struct net_device *ndev, u32 isr) 1109 { 1110 struct xcan_priv *priv = netdev_priv(ndev); 1111 1112 /* Check for Sleep interrupt if set put CAN device in sleep state */ 1113 if (isr & XCAN_IXR_SLP_MASK) 1114 priv->can.state = CAN_STATE_SLEEPING; 1115 1116 /* Check for Wake up interrupt if set put CAN device in Active state */ 1117 if (isr & XCAN_IXR_WKUP_MASK) 1118 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1119 } 1120 1121 /** 1122 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame 1123 * @priv: Driver private data structure 1124 * 1125 * Return: Register offset of the next frame in RX FIFO. 1126 */ 1127 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) 1128 { 1129 int offset; 1130 1131 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { 1132 u32 fsr, mask; 1133 1134 /* clear RXOK before the is-empty check so that any newly 1135 * received frame will reassert it without a race 1136 */ 1137 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); 1138 1139 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 1140 1141 /* check if RX FIFO is empty */ 1142 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1143 mask = XCAN_2_FSR_FL_MASK; 1144 else 1145 mask = XCAN_FSR_FL_MASK; 1146 1147 if (!(fsr & mask)) 1148 return -ENOENT; 1149 1150 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1151 offset = 1152 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); 1153 else 1154 offset = 1155 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1156 1157 } else { 1158 /* check if RX FIFO is empty */ 1159 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & 1160 XCAN_IXR_RXNEMP_MASK)) 1161 return -ENOENT; 1162 1163 /* frames are read from a static offset */ 1164 offset = XCAN_RXFIFO_OFFSET; 1165 } 1166 1167 return offset; 1168 } 1169 1170 /** 1171 * xcan_rx_poll - Poll routine for rx packets (NAPI) 1172 * @napi: napi structure pointer 1173 * @quota: Max number of rx packets to be processed. 1174 * 1175 * This is the poll routine for rx part. 1176 * It will process the packets maximux quota value. 1177 * 1178 * Return: number of packets received 1179 */ 1180 static int xcan_rx_poll(struct napi_struct *napi, int quota) 1181 { 1182 struct net_device *ndev = napi->dev; 1183 struct xcan_priv *priv = netdev_priv(ndev); 1184 u32 ier; 1185 int work_done = 0; 1186 int frame_offset; 1187 1188 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && 1189 (work_done < quota)) { 1190 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) 1191 work_done += xcanfd_rx(ndev, frame_offset); 1192 else 1193 work_done += xcan_rx(ndev, frame_offset); 1194 1195 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 1196 /* increment read index */ 1197 priv->write_reg(priv, XCAN_FSR_OFFSET, 1198 XCAN_FSR_IRI_MASK); 1199 else 1200 /* clear rx-not-empty (will actually clear only if 1201 * empty) 1202 */ 1203 priv->write_reg(priv, XCAN_ICR_OFFSET, 1204 XCAN_IXR_RXNEMP_MASK); 1205 } 1206 1207 if (work_done) { 1208 can_led_event(ndev, CAN_LED_EVENT_RX); 1209 xcan_update_error_state_after_rxtx(ndev); 1210 } 1211 1212 if (work_done < quota) { 1213 napi_complete_done(napi, work_done); 1214 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1215 ier |= xcan_rx_int_mask(priv); 1216 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1217 } 1218 return work_done; 1219 } 1220 1221 /** 1222 * xcan_tx_interrupt - Tx Done Isr 1223 * @ndev: net_device pointer 1224 * @isr: Interrupt status register value 1225 */ 1226 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) 1227 { 1228 struct xcan_priv *priv = netdev_priv(ndev); 1229 struct net_device_stats *stats = &ndev->stats; 1230 unsigned int frames_in_fifo; 1231 int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 1232 unsigned long flags; 1233 int retries = 0; 1234 1235 /* Synchronize with xmit as we need to know the exact number 1236 * of frames in the FIFO to stay in sync due to the TXFEMP 1237 * handling. 1238 * This also prevents a race between netif_wake_queue() and 1239 * netif_stop_queue(). 1240 */ 1241 spin_lock_irqsave(&priv->tx_lock, flags); 1242 1243 frames_in_fifo = priv->tx_head - priv->tx_tail; 1244 1245 if (WARN_ON_ONCE(frames_in_fifo == 0)) { 1246 /* clear TXOK anyway to avoid getting back here */ 1247 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1248 spin_unlock_irqrestore(&priv->tx_lock, flags); 1249 return; 1250 } 1251 1252 /* Check if 2 frames were sent (TXOK only means that at least 1 1253 * frame was sent). 1254 */ 1255 if (frames_in_fifo > 1) { 1256 WARN_ON(frames_in_fifo > priv->tx_max); 1257 1258 /* Synchronize TXOK and isr so that after the loop: 1259 * (1) isr variable is up-to-date at least up to TXOK clear 1260 * time. This avoids us clearing a TXOK of a second frame 1261 * but not noticing that the FIFO is now empty and thus 1262 * marking only a single frame as sent. 1263 * (2) No TXOK is left. Having one could mean leaving a 1264 * stray TXOK as we might process the associated frame 1265 * via TXFEMP handling as we read TXFEMP *after* TXOK 1266 * clear to satisfy (1). 1267 */ 1268 while ((isr & XCAN_IXR_TXOK_MASK) && 1269 !WARN_ON(++retries == 100)) { 1270 priv->write_reg(priv, XCAN_ICR_OFFSET, 1271 XCAN_IXR_TXOK_MASK); 1272 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1273 } 1274 1275 if (isr & XCAN_IXR_TXFEMP_MASK) { 1276 /* nothing in FIFO anymore */ 1277 frames_sent = frames_in_fifo; 1278 } 1279 } else { 1280 /* single frame in fifo, just clear TXOK */ 1281 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1282 } 1283 1284 while (frames_sent--) { 1285 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % 1286 priv->tx_max); 1287 priv->tx_tail++; 1288 stats->tx_packets++; 1289 } 1290 1291 netif_wake_queue(ndev); 1292 1293 spin_unlock_irqrestore(&priv->tx_lock, flags); 1294 1295 can_led_event(ndev, CAN_LED_EVENT_TX); 1296 xcan_update_error_state_after_rxtx(ndev); 1297 } 1298 1299 /** 1300 * xcan_interrupt - CAN Isr 1301 * @irq: irq number 1302 * @dev_id: device id poniter 1303 * 1304 * This is the xilinx CAN Isr. It checks for the type of interrupt 1305 * and invokes the corresponding ISR. 1306 * 1307 * Return: 1308 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise 1309 */ 1310 static irqreturn_t xcan_interrupt(int irq, void *dev_id) 1311 { 1312 struct net_device *ndev = (struct net_device *)dev_id; 1313 struct xcan_priv *priv = netdev_priv(ndev); 1314 u32 isr, ier; 1315 u32 isr_errors; 1316 u32 rx_int_mask = xcan_rx_int_mask(priv); 1317 1318 /* Get the interrupt status from Xilinx CAN */ 1319 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1320 if (!isr) 1321 return IRQ_NONE; 1322 1323 /* Check for the type of interrupt and Processing it */ 1324 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { 1325 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | 1326 XCAN_IXR_WKUP_MASK)); 1327 xcan_state_interrupt(ndev, isr); 1328 } 1329 1330 /* Check for Tx interrupt and Processing it */ 1331 if (isr & XCAN_IXR_TXOK_MASK) 1332 xcan_tx_interrupt(ndev, isr); 1333 1334 /* Check for the type of error interrupt and Processing it */ 1335 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1336 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | 1337 XCAN_IXR_RXMNF_MASK); 1338 if (isr_errors) { 1339 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 1340 xcan_err_interrupt(ndev, isr); 1341 } 1342 1343 /* Check for the type of receive interrupt and Processing it */ 1344 if (isr & rx_int_mask) { 1345 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1346 ier &= ~rx_int_mask; 1347 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1348 napi_schedule(&priv->napi); 1349 } 1350 return IRQ_HANDLED; 1351 } 1352 1353 /** 1354 * xcan_chip_stop - Driver stop routine 1355 * @ndev: Pointer to net_device structure 1356 * 1357 * This is the drivers stop routine. It will disable the 1358 * interrupts and put the device into configuration mode. 1359 */ 1360 static void xcan_chip_stop(struct net_device *ndev) 1361 { 1362 struct xcan_priv *priv = netdev_priv(ndev); 1363 1364 /* Disable interrupts and leave the can in configuration mode */ 1365 set_reset_mode(ndev); 1366 priv->can.state = CAN_STATE_STOPPED; 1367 } 1368 1369 /** 1370 * xcan_open - Driver open routine 1371 * @ndev: Pointer to net_device structure 1372 * 1373 * This is the driver open routine. 1374 * Return: 0 on success and failure value on error 1375 */ 1376 static int xcan_open(struct net_device *ndev) 1377 { 1378 struct xcan_priv *priv = netdev_priv(ndev); 1379 int ret; 1380 1381 ret = pm_runtime_get_sync(priv->dev); 1382 if (ret < 0) { 1383 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1384 __func__, ret); 1385 return ret; 1386 } 1387 1388 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, 1389 ndev->name, ndev); 1390 if (ret < 0) { 1391 netdev_err(ndev, "irq allocation for CAN failed\n"); 1392 goto err; 1393 } 1394 1395 /* Set chip into reset mode */ 1396 ret = set_reset_mode(ndev); 1397 if (ret < 0) { 1398 netdev_err(ndev, "mode resetting failed!\n"); 1399 goto err_irq; 1400 } 1401 1402 /* Common open */ 1403 ret = open_candev(ndev); 1404 if (ret) 1405 goto err_irq; 1406 1407 ret = xcan_chip_start(ndev); 1408 if (ret < 0) { 1409 netdev_err(ndev, "xcan_chip_start failed!\n"); 1410 goto err_candev; 1411 } 1412 1413 can_led_event(ndev, CAN_LED_EVENT_OPEN); 1414 napi_enable(&priv->napi); 1415 netif_start_queue(ndev); 1416 1417 return 0; 1418 1419 err_candev: 1420 close_candev(ndev); 1421 err_irq: 1422 free_irq(ndev->irq, ndev); 1423 err: 1424 pm_runtime_put(priv->dev); 1425 1426 return ret; 1427 } 1428 1429 /** 1430 * xcan_close - Driver close routine 1431 * @ndev: Pointer to net_device structure 1432 * 1433 * Return: 0 always 1434 */ 1435 static int xcan_close(struct net_device *ndev) 1436 { 1437 struct xcan_priv *priv = netdev_priv(ndev); 1438 1439 netif_stop_queue(ndev); 1440 napi_disable(&priv->napi); 1441 xcan_chip_stop(ndev); 1442 free_irq(ndev->irq, ndev); 1443 close_candev(ndev); 1444 1445 can_led_event(ndev, CAN_LED_EVENT_STOP); 1446 pm_runtime_put(priv->dev); 1447 1448 return 0; 1449 } 1450 1451 /** 1452 * xcan_get_berr_counter - error counter routine 1453 * @ndev: Pointer to net_device structure 1454 * @bec: Pointer to can_berr_counter structure 1455 * 1456 * This is the driver error counter routine. 1457 * Return: 0 on success and failure value on error 1458 */ 1459 static int xcan_get_berr_counter(const struct net_device *ndev, 1460 struct can_berr_counter *bec) 1461 { 1462 struct xcan_priv *priv = netdev_priv(ndev); 1463 int ret; 1464 1465 ret = pm_runtime_get_sync(priv->dev); 1466 if (ret < 0) { 1467 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1468 __func__, ret); 1469 return ret; 1470 } 1471 1472 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 1473 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 1474 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 1475 1476 pm_runtime_put(priv->dev); 1477 1478 return 0; 1479 } 1480 1481 static const struct net_device_ops xcan_netdev_ops = { 1482 .ndo_open = xcan_open, 1483 .ndo_stop = xcan_close, 1484 .ndo_start_xmit = xcan_start_xmit, 1485 .ndo_change_mtu = can_change_mtu, 1486 }; 1487 1488 /** 1489 * xcan_suspend - Suspend method for the driver 1490 * @dev: Address of the device structure 1491 * 1492 * Put the driver into low power mode. 1493 * Return: 0 on success and failure value on error 1494 */ 1495 static int __maybe_unused xcan_suspend(struct device *dev) 1496 { 1497 struct net_device *ndev = dev_get_drvdata(dev); 1498 1499 if (netif_running(ndev)) { 1500 netif_stop_queue(ndev); 1501 netif_device_detach(ndev); 1502 xcan_chip_stop(ndev); 1503 } 1504 1505 return pm_runtime_force_suspend(dev); 1506 } 1507 1508 /** 1509 * xcan_resume - Resume from suspend 1510 * @dev: Address of the device structure 1511 * 1512 * Resume operation after suspend. 1513 * Return: 0 on success and failure value on error 1514 */ 1515 static int __maybe_unused xcan_resume(struct device *dev) 1516 { 1517 struct net_device *ndev = dev_get_drvdata(dev); 1518 int ret; 1519 1520 ret = pm_runtime_force_resume(dev); 1521 if (ret) { 1522 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 1523 return ret; 1524 } 1525 1526 if (netif_running(ndev)) { 1527 ret = xcan_chip_start(ndev); 1528 if (ret) { 1529 dev_err(dev, "xcan_chip_start failed on resume\n"); 1530 return ret; 1531 } 1532 1533 netif_device_attach(ndev); 1534 netif_start_queue(ndev); 1535 } 1536 1537 return 0; 1538 } 1539 1540 /** 1541 * xcan_runtime_suspend - Runtime suspend method for the driver 1542 * @dev: Address of the device structure 1543 * 1544 * Put the driver into low power mode. 1545 * Return: 0 always 1546 */ 1547 static int __maybe_unused xcan_runtime_suspend(struct device *dev) 1548 { 1549 struct net_device *ndev = dev_get_drvdata(dev); 1550 struct xcan_priv *priv = netdev_priv(ndev); 1551 1552 clk_disable_unprepare(priv->bus_clk); 1553 clk_disable_unprepare(priv->can_clk); 1554 1555 return 0; 1556 } 1557 1558 /** 1559 * xcan_runtime_resume - Runtime resume from suspend 1560 * @dev: Address of the device structure 1561 * 1562 * Resume operation after suspend. 1563 * Return: 0 on success and failure value on error 1564 */ 1565 static int __maybe_unused xcan_runtime_resume(struct device *dev) 1566 { 1567 struct net_device *ndev = dev_get_drvdata(dev); 1568 struct xcan_priv *priv = netdev_priv(ndev); 1569 int ret; 1570 1571 ret = clk_prepare_enable(priv->bus_clk); 1572 if (ret) { 1573 dev_err(dev, "Cannot enable clock.\n"); 1574 return ret; 1575 } 1576 ret = clk_prepare_enable(priv->can_clk); 1577 if (ret) { 1578 dev_err(dev, "Cannot enable clock.\n"); 1579 clk_disable_unprepare(priv->bus_clk); 1580 return ret; 1581 } 1582 1583 return 0; 1584 } 1585 1586 static const struct dev_pm_ops xcan_dev_pm_ops = { 1587 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1588 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1589 }; 1590 1591 static const struct xcan_devtype_data xcan_zynq_data = { 1592 .cantype = XZYNQ_CANPS, 1593 .flags = XCAN_FLAG_TXFEMP, 1594 .bittiming_const = &xcan_bittiming_const, 1595 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1596 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1597 .bus_clk_name = "pclk", 1598 }; 1599 1600 static const struct xcan_devtype_data xcan_axi_data = { 1601 .cantype = XAXI_CAN, 1602 .flags = XCAN_FLAG_TXFEMP, 1603 .bittiming_const = &xcan_bittiming_const, 1604 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1605 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1606 .bus_clk_name = "s_axi_aclk", 1607 }; 1608 1609 static const struct xcan_devtype_data xcan_canfd_data = { 1610 .cantype = XAXI_CANFD, 1611 .flags = XCAN_FLAG_EXT_FILTERS | 1612 XCAN_FLAG_RXMNF | 1613 XCAN_FLAG_TX_MAILBOXES | 1614 XCAN_FLAG_RX_FIFO_MULTI, 1615 .bittiming_const = &xcan_bittiming_const_canfd, 1616 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1617 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1618 .bus_clk_name = "s_axi_aclk", 1619 }; 1620 1621 static const struct xcan_devtype_data xcan_canfd2_data = { 1622 .cantype = XAXI_CANFD_2_0, 1623 .flags = XCAN_FLAG_EXT_FILTERS | 1624 XCAN_FLAG_RXMNF | 1625 XCAN_FLAG_TX_MAILBOXES | 1626 XCAN_FLAG_CANFD_2 | 1627 XCAN_FLAG_RX_FIFO_MULTI, 1628 .bittiming_const = &xcan_bittiming_const_canfd2, 1629 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1630 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1631 .bus_clk_name = "s_axi_aclk", 1632 }; 1633 1634 /* Match table for OF platform binding */ 1635 static const struct of_device_id xcan_of_match[] = { 1636 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1637 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, 1638 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, 1639 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, 1640 { /* end of list */ }, 1641 }; 1642 MODULE_DEVICE_TABLE(of, xcan_of_match); 1643 1644 /** 1645 * xcan_probe - Platform registration call 1646 * @pdev: Handle to the platform device structure 1647 * 1648 * This function does all the memory allocation and registration for the CAN 1649 * device. 1650 * 1651 * Return: 0 on success and failure value on error 1652 */ 1653 static int xcan_probe(struct platform_device *pdev) 1654 { 1655 struct resource *res; /* IO mem resources */ 1656 struct net_device *ndev; 1657 struct xcan_priv *priv; 1658 const struct of_device_id *of_id; 1659 const struct xcan_devtype_data *devtype = &xcan_axi_data; 1660 void __iomem *addr; 1661 int ret; 1662 int rx_max, tx_max; 1663 int hw_tx_max, hw_rx_max; 1664 const char *hw_tx_max_property; 1665 1666 /* Get the virtual base address for the device */ 1667 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1668 addr = devm_ioremap_resource(&pdev->dev, res); 1669 if (IS_ERR(addr)) { 1670 ret = PTR_ERR(addr); 1671 goto err; 1672 } 1673 1674 of_id = of_match_device(xcan_of_match, &pdev->dev); 1675 if (of_id && of_id->data) 1676 devtype = of_id->data; 1677 1678 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? 1679 "tx-mailbox-count" : "tx-fifo-depth"; 1680 1681 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, 1682 &hw_tx_max); 1683 if (ret < 0) { 1684 dev_err(&pdev->dev, "missing %s property\n", 1685 hw_tx_max_property); 1686 goto err; 1687 } 1688 1689 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", 1690 &hw_rx_max); 1691 if (ret < 0) { 1692 dev_err(&pdev->dev, 1693 "missing rx-fifo-depth property (mailbox mode is not supported)\n"); 1694 goto err; 1695 } 1696 1697 /* With TX FIFO: 1698 * 1699 * There is no way to directly figure out how many frames have been 1700 * sent when the TXOK interrupt is processed. If TXFEMP 1701 * is supported, we can have 2 frames in the FIFO and use TXFEMP 1702 * to determine if 1 or 2 frames have been sent. 1703 * Theoretically we should be able to use TXFWMEMP to determine up 1704 * to 3 frames, but it seems that after putting a second frame in the 1705 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1706 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1707 * sent), which is not a sensible state - possibly TXFWMEMP is not 1708 * completely synchronized with the rest of the bits? 1709 * 1710 * With TX mailboxes: 1711 * 1712 * HW sends frames in CAN ID priority order. To preserve FIFO ordering 1713 * we submit frames one at a time. 1714 */ 1715 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && 1716 (devtype->flags & XCAN_FLAG_TXFEMP)) 1717 tx_max = min(hw_tx_max, 2); 1718 else 1719 tx_max = 1; 1720 1721 rx_max = hw_rx_max; 1722 1723 /* Create a CAN device instance */ 1724 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1725 if (!ndev) 1726 return -ENOMEM; 1727 1728 priv = netdev_priv(ndev); 1729 priv->dev = &pdev->dev; 1730 priv->can.bittiming_const = devtype->bittiming_const; 1731 priv->can.do_set_mode = xcan_do_set_mode; 1732 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1733 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1734 CAN_CTRLMODE_BERR_REPORTING; 1735 1736 if (devtype->cantype == XAXI_CANFD) 1737 priv->can.data_bittiming_const = 1738 &xcan_data_bittiming_const_canfd; 1739 1740 if (devtype->cantype == XAXI_CANFD_2_0) 1741 priv->can.data_bittiming_const = 1742 &xcan_data_bittiming_const_canfd2; 1743 1744 if (devtype->cantype == XAXI_CANFD || 1745 devtype->cantype == XAXI_CANFD_2_0) 1746 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; 1747 1748 priv->reg_base = addr; 1749 priv->tx_max = tx_max; 1750 priv->devtype = *devtype; 1751 spin_lock_init(&priv->tx_lock); 1752 1753 /* Get IRQ for the device */ 1754 ndev->irq = platform_get_irq(pdev, 0); 1755 ndev->flags |= IFF_ECHO; /* We support local echo */ 1756 1757 platform_set_drvdata(pdev, ndev); 1758 SET_NETDEV_DEV(ndev, &pdev->dev); 1759 ndev->netdev_ops = &xcan_netdev_ops; 1760 1761 /* Getting the CAN can_clk info */ 1762 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1763 if (IS_ERR(priv->can_clk)) { 1764 if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER) 1765 dev_err(&pdev->dev, "Device clock not found.\n"); 1766 ret = PTR_ERR(priv->can_clk); 1767 goto err_free; 1768 } 1769 1770 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1771 if (IS_ERR(priv->bus_clk)) { 1772 dev_err(&pdev->dev, "bus clock not found\n"); 1773 ret = PTR_ERR(priv->bus_clk); 1774 goto err_free; 1775 } 1776 1777 priv->write_reg = xcan_write_reg_le; 1778 priv->read_reg = xcan_read_reg_le; 1779 1780 pm_runtime_enable(&pdev->dev); 1781 ret = pm_runtime_get_sync(&pdev->dev); 1782 if (ret < 0) { 1783 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1784 __func__, ret); 1785 goto err_pmdisable; 1786 } 1787 1788 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { 1789 priv->write_reg = xcan_write_reg_be; 1790 priv->read_reg = xcan_read_reg_be; 1791 } 1792 1793 priv->can.clock.freq = clk_get_rate(priv->can_clk); 1794 1795 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); 1796 1797 ret = register_candev(ndev); 1798 if (ret) { 1799 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); 1800 goto err_disableclks; 1801 } 1802 1803 devm_can_led_init(ndev); 1804 1805 pm_runtime_put(&pdev->dev); 1806 1807 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", 1808 priv->reg_base, ndev->irq, priv->can.clock.freq, 1809 hw_tx_max, priv->tx_max); 1810 1811 return 0; 1812 1813 err_disableclks: 1814 pm_runtime_put(priv->dev); 1815 err_pmdisable: 1816 pm_runtime_disable(&pdev->dev); 1817 err_free: 1818 free_candev(ndev); 1819 err: 1820 return ret; 1821 } 1822 1823 /** 1824 * xcan_remove - Unregister the device after releasing the resources 1825 * @pdev: Handle to the platform device structure 1826 * 1827 * This function frees all the resources allocated to the device. 1828 * Return: 0 always 1829 */ 1830 static int xcan_remove(struct platform_device *pdev) 1831 { 1832 struct net_device *ndev = platform_get_drvdata(pdev); 1833 struct xcan_priv *priv = netdev_priv(ndev); 1834 1835 unregister_candev(ndev); 1836 pm_runtime_disable(&pdev->dev); 1837 netif_napi_del(&priv->napi); 1838 free_candev(ndev); 1839 1840 return 0; 1841 } 1842 1843 static struct platform_driver xcan_driver = { 1844 .probe = xcan_probe, 1845 .remove = xcan_remove, 1846 .driver = { 1847 .name = DRIVER_NAME, 1848 .pm = &xcan_dev_pm_ops, 1849 .of_match_table = xcan_of_match, 1850 }, 1851 }; 1852 1853 module_platform_driver(xcan_driver); 1854 1855 MODULE_LICENSE("GPL"); 1856 MODULE_AUTHOR("Xilinx Inc"); 1857 MODULE_DESCRIPTION("Xilinx CAN interface"); 1858