1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // bxcan.c - STM32 Basic Extended CAN controller driver 4 // 5 // Copyright (c) 2022 Dario Binacchi <dario.binacchi@amarulasolutions.com> 6 // 7 // NOTE: The ST documentation uses the terms master/slave instead of 8 // primary/secondary. 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/bitfield.h> 13 #include <linux/can.h> 14 #include <linux/can/dev.h> 15 #include <linux/can/error.h> 16 #include <linux/can/rx-offload.h> 17 #include <linux/clk.h> 18 #include <linux/ethtool.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/iopoll.h> 22 #include <linux/kernel.h> 23 #include <linux/mfd/syscon.h> 24 #include <linux/module.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/platform_device.h> 28 #include <linux/regmap.h> 29 30 #define BXCAN_NAPI_WEIGHT 3 31 #define BXCAN_TIMEOUT_US 10000 32 33 #define BXCAN_RX_MB_NUM 2 34 #define BXCAN_TX_MB_NUM 3 35 36 /* Primary control register (MCR) bits */ 37 #define BXCAN_MCR_RESET BIT(15) 38 #define BXCAN_MCR_TTCM BIT(7) 39 #define BXCAN_MCR_ABOM BIT(6) 40 #define BXCAN_MCR_AWUM BIT(5) 41 #define BXCAN_MCR_NART BIT(4) 42 #define BXCAN_MCR_RFLM BIT(3) 43 #define BXCAN_MCR_TXFP BIT(2) 44 #define BXCAN_MCR_SLEEP BIT(1) 45 #define BXCAN_MCR_INRQ BIT(0) 46 47 /* Primary status register (MSR) bits */ 48 #define BXCAN_MSR_ERRI BIT(2) 49 #define BXCAN_MSR_SLAK BIT(1) 50 #define BXCAN_MSR_INAK BIT(0) 51 52 /* Transmit status register (TSR) bits */ 53 #define BXCAN_TSR_RQCP2 BIT(16) 54 #define BXCAN_TSR_RQCP1 BIT(8) 55 #define BXCAN_TSR_RQCP0 BIT(0) 56 57 /* Receive FIFO 0 register (RF0R) bits */ 58 #define BXCAN_RF0R_RFOM0 BIT(5) 59 #define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0) 60 61 /* Interrupt enable register (IER) bits */ 62 #define BXCAN_IER_SLKIE BIT(17) 63 #define BXCAN_IER_WKUIE BIT(16) 64 #define BXCAN_IER_ERRIE BIT(15) 65 #define BXCAN_IER_LECIE BIT(11) 66 #define BXCAN_IER_BOFIE BIT(10) 67 #define BXCAN_IER_EPVIE BIT(9) 68 #define BXCAN_IER_EWGIE BIT(8) 69 #define BXCAN_IER_FOVIE1 BIT(6) 70 #define BXCAN_IER_FFIE1 BIT(5) 71 #define BXCAN_IER_FMPIE1 BIT(4) 72 #define BXCAN_IER_FOVIE0 BIT(3) 73 #define BXCAN_IER_FFIE0 BIT(2) 74 #define BXCAN_IER_FMPIE0 BIT(1) 75 #define BXCAN_IER_TMEIE BIT(0) 76 77 /* Error status register (ESR) bits */ 78 #define BXCAN_ESR_REC_MASK GENMASK(31, 24) 79 #define BXCAN_ESR_TEC_MASK GENMASK(23, 16) 80 #define BXCAN_ESR_LEC_MASK GENMASK(6, 4) 81 #define BXCAN_ESR_BOFF BIT(2) 82 #define BXCAN_ESR_EPVF BIT(1) 83 #define BXCAN_ESR_EWGF BIT(0) 84 85 /* Bit timing register (BTR) bits */ 86 #define BXCAN_BTR_SILM BIT(31) 87 #define BXCAN_BTR_LBKM BIT(30) 88 #define BXCAN_BTR_SJW_MASK GENMASK(25, 24) 89 #define BXCAN_BTR_TS2_MASK GENMASK(22, 20) 90 #define BXCAN_BTR_TS1_MASK GENMASK(19, 16) 91 #define BXCAN_BTR_BRP_MASK GENMASK(9, 0) 92 93 /* TX mailbox identifier register (TIxR, x = 0..2) bits */ 94 #define BXCAN_TIxR_STID_MASK GENMASK(31, 21) 95 #define BXCAN_TIxR_EXID_MASK GENMASK(31, 3) 96 #define BXCAN_TIxR_IDE BIT(2) 97 #define BXCAN_TIxR_RTR BIT(1) 98 #define BXCAN_TIxR_TXRQ BIT(0) 99 100 /* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */ 101 #define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0) 102 103 /* RX FIFO mailbox identifier register (RIxR, x = 0..1 */ 104 #define BXCAN_RIxR_STID_MASK GENMASK(31, 21) 105 #define BXCAN_RIxR_EXID_MASK GENMASK(31, 3) 106 #define BXCAN_RIxR_IDE BIT(2) 107 #define BXCAN_RIxR_RTR BIT(1) 108 109 /* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */ 110 #define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16) 111 #define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0) 112 113 #define BXCAN_FMR_REG 0x00 114 #define BXCAN_FM1R_REG 0x04 115 #define BXCAN_FS1R_REG 0x0c 116 #define BXCAN_FFA1R_REG 0x14 117 #define BXCAN_FA1R_REG 0x1c 118 #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8) 119 #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8) 120 121 #define BXCAN_FILTER_ID(primary) (primary ? 0 : 14) 122 123 /* Filter primary register (FMR) bits */ 124 #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8) 125 #define BXCAN_FMR_FINIT BIT(0) 126 127 enum bxcan_lec_code { 128 BXCAN_LEC_NO_ERROR = 0, 129 BXCAN_LEC_STUFF_ERROR, 130 BXCAN_LEC_FORM_ERROR, 131 BXCAN_LEC_ACK_ERROR, 132 BXCAN_LEC_BIT1_ERROR, 133 BXCAN_LEC_BIT0_ERROR, 134 BXCAN_LEC_CRC_ERROR, 135 BXCAN_LEC_UNUSED 136 }; 137 138 /* Structure of the message buffer */ 139 struct bxcan_mb { 140 u32 id; /* can identifier */ 141 u32 dlc; /* data length control and timestamp */ 142 u32 data[2]; /* data */ 143 }; 144 145 /* Structure of the hardware registers */ 146 struct bxcan_regs { 147 u32 mcr; /* 0x00 - primary control */ 148 u32 msr; /* 0x04 - primary status */ 149 u32 tsr; /* 0x08 - transmit status */ 150 u32 rf0r; /* 0x0c - FIFO 0 */ 151 u32 rf1r; /* 0x10 - FIFO 1 */ 152 u32 ier; /* 0x14 - interrupt enable */ 153 u32 esr; /* 0x18 - error status */ 154 u32 btr; /* 0x1c - bit timing*/ 155 u32 reserved0[88]; /* 0x20 */ 156 struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM]; /* 0x180 - tx mailbox */ 157 struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM]; /* 0x1b0 - rx mailbox */ 158 }; 159 160 struct bxcan_priv { 161 struct can_priv can; 162 struct can_rx_offload offload; 163 struct device *dev; 164 struct net_device *ndev; 165 166 struct bxcan_regs __iomem *regs; 167 struct regmap *gcan; 168 int tx_irq; 169 int sce_irq; 170 bool primary; 171 struct clk *clk; 172 spinlock_t rmw_lock; /* lock for read-modify-write operations */ 173 unsigned int tx_head; 174 unsigned int tx_tail; 175 u32 timestamp; 176 }; 177 178 static const struct can_bittiming_const bxcan_bittiming_const = { 179 .name = KBUILD_MODNAME, 180 .tseg1_min = 1, 181 .tseg1_max = 16, 182 .tseg2_min = 1, 183 .tseg2_max = 8, 184 .sjw_max = 4, 185 .brp_min = 1, 186 .brp_max = 1024, 187 .brp_inc = 1, 188 }; 189 190 static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr, 191 u32 clear, u32 set) 192 { 193 unsigned long flags; 194 u32 old, val; 195 196 spin_lock_irqsave(&priv->rmw_lock, flags); 197 old = readl(addr); 198 val = (old & ~clear) | set; 199 if (val != old) 200 writel(val, addr); 201 202 spin_unlock_irqrestore(&priv->rmw_lock, flags); 203 } 204 205 static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary) 206 { 207 unsigned int fid = BXCAN_FILTER_ID(primary); 208 u32 fmask = BIT(fid); 209 210 regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); 211 } 212 213 static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary) 214 { 215 unsigned int fid = BXCAN_FILTER_ID(primary); 216 u32 fmask = BIT(fid); 217 218 /* Filter settings: 219 * 220 * Accept all messages. 221 * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier 222 * mask mode with 32 bits width. 223 */ 224 225 /* Enter filter initialization mode and assing filters to CAN 226 * controllers. 227 */ 228 regmap_update_bits(priv->gcan, BXCAN_FMR_REG, 229 BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT, 230 FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) | 231 BXCAN_FMR_FINIT); 232 233 /* Deactivate filter */ 234 regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); 235 236 /* Two 32-bit registers in identifier mask mode */ 237 regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0); 238 239 /* Single 32-bit scale configuration */ 240 regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask); 241 242 /* Assign filter to FIFO 0 */ 243 regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0); 244 245 /* Accept all messages */ 246 regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0); 247 regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0); 248 249 /* Activate filter */ 250 regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask); 251 252 /* Exit filter initialization mode */ 253 regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0); 254 } 255 256 static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv) 257 { 258 return priv->tx_head % BXCAN_TX_MB_NUM; 259 } 260 261 static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv) 262 { 263 return priv->tx_tail % BXCAN_TX_MB_NUM; 264 } 265 266 static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv) 267 { 268 return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail); 269 } 270 271 static bool bxcan_tx_busy(const struct bxcan_priv *priv) 272 { 273 if (bxcan_get_tx_free(priv) > 0) 274 return false; 275 276 netif_stop_queue(priv->ndev); 277 278 /* Memory barrier before checking tx_free (head and tail) */ 279 smp_mb(); 280 281 if (bxcan_get_tx_free(priv) == 0) { 282 netdev_dbg(priv->ndev, 283 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", 284 priv->tx_head, priv->tx_tail, 285 priv->tx_head - priv->tx_tail); 286 287 return true; 288 } 289 290 netif_start_queue(priv->ndev); 291 292 return false; 293 } 294 295 static int bxcan_chip_softreset(struct bxcan_priv *priv) 296 { 297 struct bxcan_regs __iomem *regs = priv->regs; 298 u32 value; 299 300 bxcan_rmw(priv, ®s->mcr, 0, BXCAN_MCR_RESET); 301 return readx_poll_timeout(readl, ®s->msr, value, 302 value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US, 303 USEC_PER_SEC); 304 } 305 306 static int bxcan_enter_init_mode(struct bxcan_priv *priv) 307 { 308 struct bxcan_regs __iomem *regs = priv->regs; 309 u32 value; 310 311 bxcan_rmw(priv, ®s->mcr, 0, BXCAN_MCR_INRQ); 312 return readx_poll_timeout(readl, ®s->msr, value, 313 value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US, 314 USEC_PER_SEC); 315 } 316 317 static int bxcan_leave_init_mode(struct bxcan_priv *priv) 318 { 319 struct bxcan_regs __iomem *regs = priv->regs; 320 u32 value; 321 322 bxcan_rmw(priv, ®s->mcr, BXCAN_MCR_INRQ, 0); 323 return readx_poll_timeout(readl, ®s->msr, value, 324 !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US, 325 USEC_PER_SEC); 326 } 327 328 static int bxcan_enter_sleep_mode(struct bxcan_priv *priv) 329 { 330 struct bxcan_regs __iomem *regs = priv->regs; 331 u32 value; 332 333 bxcan_rmw(priv, ®s->mcr, 0, BXCAN_MCR_SLEEP); 334 return readx_poll_timeout(readl, ®s->msr, value, 335 value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US, 336 USEC_PER_SEC); 337 } 338 339 static int bxcan_leave_sleep_mode(struct bxcan_priv *priv) 340 { 341 struct bxcan_regs __iomem *regs = priv->regs; 342 u32 value; 343 344 bxcan_rmw(priv, ®s->mcr, BXCAN_MCR_SLEEP, 0); 345 return readx_poll_timeout(readl, ®s->msr, value, 346 !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US, 347 USEC_PER_SEC); 348 } 349 350 static inline 351 struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 352 { 353 return container_of(offload, struct bxcan_priv, offload); 354 } 355 356 static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload, 357 unsigned int mbxno, u32 *timestamp, 358 bool drop) 359 { 360 struct bxcan_priv *priv = rx_offload_to_priv(offload); 361 struct bxcan_regs __iomem *regs = priv->regs; 362 struct bxcan_mb __iomem *mb_regs = ®s->rx_mb[0]; 363 struct sk_buff *skb = NULL; 364 struct can_frame *cf; 365 u32 rf0r, id, dlc; 366 367 rf0r = readl(®s->rf0r); 368 if (unlikely(drop)) { 369 skb = ERR_PTR(-ENOBUFS); 370 goto mark_as_read; 371 } 372 373 if (!(rf0r & BXCAN_RF0R_FMP0_MASK)) 374 goto mark_as_read; 375 376 skb = alloc_can_skb(offload->dev, &cf); 377 if (unlikely(!skb)) { 378 skb = ERR_PTR(-ENOMEM); 379 goto mark_as_read; 380 } 381 382 id = readl(&mb_regs->id); 383 if (id & BXCAN_RIxR_IDE) 384 cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG; 385 else 386 cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK; 387 388 dlc = readl(&mb_regs->dlc); 389 priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc); 390 cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc)); 391 392 if (id & BXCAN_RIxR_RTR) { 393 cf->can_id |= CAN_RTR_FLAG; 394 } else { 395 int i, j; 396 397 for (i = 0, j = 0; i < cf->len; i += 4, j++) 398 *(u32 *)(cf->data + i) = readl(&mb_regs->data[j]); 399 } 400 401 mark_as_read: 402 rf0r |= BXCAN_RF0R_RFOM0; 403 writel(rf0r, ®s->rf0r); 404 return skb; 405 } 406 407 static irqreturn_t bxcan_rx_isr(int irq, void *dev_id) 408 { 409 struct net_device *ndev = dev_id; 410 struct bxcan_priv *priv = netdev_priv(ndev); 411 struct bxcan_regs __iomem *regs = priv->regs; 412 u32 rf0r; 413 414 rf0r = readl(®s->rf0r); 415 if (!(rf0r & BXCAN_RF0R_FMP0_MASK)) 416 return IRQ_NONE; 417 418 can_rx_offload_irq_offload_fifo(&priv->offload); 419 can_rx_offload_irq_finish(&priv->offload); 420 421 return IRQ_HANDLED; 422 } 423 424 static irqreturn_t bxcan_tx_isr(int irq, void *dev_id) 425 { 426 struct net_device *ndev = dev_id; 427 struct bxcan_priv *priv = netdev_priv(ndev); 428 struct bxcan_regs __iomem *regs = priv->regs; 429 struct net_device_stats *stats = &ndev->stats; 430 u32 tsr, rqcp_bit; 431 int idx; 432 433 tsr = readl(®s->tsr); 434 if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2))) 435 return IRQ_NONE; 436 437 while (priv->tx_head - priv->tx_tail > 0) { 438 idx = bxcan_get_tx_tail(priv); 439 rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3); 440 if (!(tsr & rqcp_bit)) 441 break; 442 443 stats->tx_packets++; 444 stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL); 445 priv->tx_tail++; 446 } 447 448 writel(tsr, ®s->tsr); 449 450 if (bxcan_get_tx_free(priv)) { 451 /* Make sure that anybody stopping the queue after 452 * this sees the new tx_ring->tail. 453 */ 454 smp_mb(); 455 netif_wake_queue(ndev); 456 } 457 458 return IRQ_HANDLED; 459 } 460 461 static void bxcan_handle_state_change(struct net_device *ndev, u32 esr) 462 { 463 struct bxcan_priv *priv = netdev_priv(ndev); 464 enum can_state new_state = priv->can.state; 465 struct can_berr_counter bec; 466 enum can_state rx_state, tx_state; 467 struct sk_buff *skb; 468 struct can_frame *cf; 469 470 /* Early exit if no error flag is set */ 471 if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF))) 472 return; 473 474 bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr); 475 bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr); 476 477 if (esr & BXCAN_ESR_BOFF) 478 new_state = CAN_STATE_BUS_OFF; 479 else if (esr & BXCAN_ESR_EPVF) 480 new_state = CAN_STATE_ERROR_PASSIVE; 481 else if (esr & BXCAN_ESR_EWGF) 482 new_state = CAN_STATE_ERROR_WARNING; 483 484 /* state hasn't changed */ 485 if (unlikely(new_state == priv->can.state)) 486 return; 487 488 skb = alloc_can_err_skb(ndev, &cf); 489 490 tx_state = bec.txerr >= bec.rxerr ? new_state : 0; 491 rx_state = bec.txerr <= bec.rxerr ? new_state : 0; 492 can_change_state(ndev, cf, tx_state, rx_state); 493 494 if (new_state == CAN_STATE_BUS_OFF) { 495 can_bus_off(ndev); 496 } else if (skb) { 497 cf->can_id |= CAN_ERR_CNT; 498 cf->data[6] = bec.txerr; 499 cf->data[7] = bec.rxerr; 500 } 501 502 if (skb) { 503 int err; 504 505 err = can_rx_offload_queue_timestamp(&priv->offload, skb, 506 priv->timestamp); 507 if (err) 508 ndev->stats.rx_fifo_errors++; 509 } 510 } 511 512 static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr) 513 { 514 struct bxcan_priv *priv = netdev_priv(ndev); 515 enum bxcan_lec_code lec_code; 516 struct can_frame *cf; 517 struct sk_buff *skb; 518 519 lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr); 520 521 /* Early exit if no lec update or no error. 522 * No lec update means that no CAN bus event has been detected 523 * since CPU wrote BXCAN_LEC_UNUSED value to status reg. 524 */ 525 if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR) 526 return; 527 528 /* Common for all type of bus errors */ 529 priv->can.can_stats.bus_error++; 530 531 /* Propagate the error condition to the CAN stack */ 532 skb = alloc_can_err_skb(ndev, &cf); 533 if (skb) 534 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 535 536 switch (lec_code) { 537 case BXCAN_LEC_STUFF_ERROR: 538 netdev_dbg(ndev, "Stuff error\n"); 539 ndev->stats.rx_errors++; 540 if (skb) 541 cf->data[2] |= CAN_ERR_PROT_STUFF; 542 break; 543 544 case BXCAN_LEC_FORM_ERROR: 545 netdev_dbg(ndev, "Form error\n"); 546 ndev->stats.rx_errors++; 547 if (skb) 548 cf->data[2] |= CAN_ERR_PROT_FORM; 549 break; 550 551 case BXCAN_LEC_ACK_ERROR: 552 netdev_dbg(ndev, "Ack error\n"); 553 ndev->stats.tx_errors++; 554 if (skb) { 555 cf->can_id |= CAN_ERR_ACK; 556 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 557 } 558 break; 559 560 case BXCAN_LEC_BIT1_ERROR: 561 netdev_dbg(ndev, "Bit error (recessive)\n"); 562 ndev->stats.tx_errors++; 563 if (skb) 564 cf->data[2] |= CAN_ERR_PROT_BIT1; 565 break; 566 567 case BXCAN_LEC_BIT0_ERROR: 568 netdev_dbg(ndev, "Bit error (dominant)\n"); 569 ndev->stats.tx_errors++; 570 if (skb) 571 cf->data[2] |= CAN_ERR_PROT_BIT0; 572 break; 573 574 case BXCAN_LEC_CRC_ERROR: 575 netdev_dbg(ndev, "CRC error\n"); 576 ndev->stats.rx_errors++; 577 if (skb) { 578 cf->data[2] |= CAN_ERR_PROT_BIT; 579 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 580 } 581 break; 582 583 default: 584 break; 585 } 586 587 if (skb) { 588 int err; 589 590 err = can_rx_offload_queue_timestamp(&priv->offload, skb, 591 priv->timestamp); 592 if (err) 593 ndev->stats.rx_fifo_errors++; 594 } 595 } 596 597 static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id) 598 { 599 struct net_device *ndev = dev_id; 600 struct bxcan_priv *priv = netdev_priv(ndev); 601 struct bxcan_regs __iomem *regs = priv->regs; 602 u32 msr, esr; 603 604 msr = readl(®s->msr); 605 if (!(msr & BXCAN_MSR_ERRI)) 606 return IRQ_NONE; 607 608 esr = readl(®s->esr); 609 bxcan_handle_state_change(ndev, esr); 610 611 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 612 bxcan_handle_bus_err(ndev, esr); 613 614 msr |= BXCAN_MSR_ERRI; 615 writel(msr, ®s->msr); 616 can_rx_offload_irq_finish(&priv->offload); 617 618 return IRQ_HANDLED; 619 } 620 621 static int bxcan_chip_start(struct net_device *ndev) 622 { 623 struct bxcan_priv *priv = netdev_priv(ndev); 624 struct bxcan_regs __iomem *regs = priv->regs; 625 struct can_bittiming *bt = &priv->can.bittiming; 626 u32 clr, set; 627 int err; 628 629 err = bxcan_chip_softreset(priv); 630 if (err) { 631 netdev_err(ndev, "failed to reset chip, error %pe\n", 632 ERR_PTR(err)); 633 return err; 634 } 635 636 err = bxcan_leave_sleep_mode(priv); 637 if (err) { 638 netdev_err(ndev, "failed to leave sleep mode, error %pe\n", 639 ERR_PTR(err)); 640 goto failed_leave_sleep; 641 } 642 643 err = bxcan_enter_init_mode(priv); 644 if (err) { 645 netdev_err(ndev, "failed to enter init mode, error %pe\n", 646 ERR_PTR(err)); 647 goto failed_enter_init; 648 } 649 650 /* MCR 651 * 652 * select request order priority 653 * enable time triggered mode 654 * bus-off state left on sw request 655 * sleep mode left on sw request 656 * retransmit automatically on error 657 * do not lock RX FIFO on overrun 658 */ 659 bxcan_rmw(priv, ®s->mcr, 660 BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART | 661 BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP); 662 663 /* Bit timing register settings */ 664 set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) | 665 FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 + 666 bt->prop_seg - 1) | 667 FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) | 668 FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1); 669 670 /* loopback + silent mode put the controller in test mode, 671 * useful for hot self-test 672 */ 673 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 674 set |= BXCAN_BTR_LBKM; 675 676 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 677 set |= BXCAN_BTR_SILM; 678 679 bxcan_rmw(priv, ®s->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM | 680 BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK | 681 BXCAN_BTR_SJW_MASK, set); 682 683 bxcan_enable_filters(priv, priv->primary); 684 685 /* Clear all internal status */ 686 priv->tx_head = 0; 687 priv->tx_tail = 0; 688 689 err = bxcan_leave_init_mode(priv); 690 if (err) { 691 netdev_err(ndev, "failed to leave init mode, error %pe\n", 692 ERR_PTR(err)); 693 goto failed_leave_init; 694 } 695 696 /* Set a `lec` value so that we can check for updates later */ 697 bxcan_rmw(priv, ®s->esr, BXCAN_ESR_LEC_MASK, 698 FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED)); 699 700 /* IER 701 * 702 * Enable interrupt for: 703 * bus-off 704 * passive error 705 * warning error 706 * last error code 707 * RX FIFO pending message 708 * TX mailbox empty 709 */ 710 clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE | BXCAN_IER_FOVIE1 | 711 BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | 712 BXCAN_IER_FFIE0; 713 set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE | 714 BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE; 715 716 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 717 set |= BXCAN_IER_LECIE; 718 else 719 clr |= BXCAN_IER_LECIE; 720 721 bxcan_rmw(priv, ®s->ier, clr, set); 722 723 priv->can.state = CAN_STATE_ERROR_ACTIVE; 724 return 0; 725 726 failed_leave_init: 727 failed_enter_init: 728 failed_leave_sleep: 729 bxcan_chip_softreset(priv); 730 return err; 731 } 732 733 static int bxcan_open(struct net_device *ndev) 734 { 735 struct bxcan_priv *priv = netdev_priv(ndev); 736 int err; 737 738 err = clk_prepare_enable(priv->clk); 739 if (err) { 740 netdev_err(ndev, "failed to enable clock, error %pe\n", 741 ERR_PTR(err)); 742 return err; 743 } 744 745 err = open_candev(ndev); 746 if (err) { 747 netdev_err(ndev, "open_candev() failed, error %pe\n", 748 ERR_PTR(err)); 749 goto out_disable_clock; 750 } 751 752 can_rx_offload_enable(&priv->offload); 753 err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name, 754 ndev); 755 if (err) { 756 netdev_err(ndev, "failed to register rx irq(%d), error %pe\n", 757 ndev->irq, ERR_PTR(err)); 758 goto out_close_candev; 759 } 760 761 err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name, 762 ndev); 763 if (err) { 764 netdev_err(ndev, "failed to register tx irq(%d), error %pe\n", 765 priv->tx_irq, ERR_PTR(err)); 766 goto out_free_rx_irq; 767 } 768 769 err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED, 770 ndev->name, ndev); 771 if (err) { 772 netdev_err(ndev, "failed to register sce irq(%d), error %pe\n", 773 priv->sce_irq, ERR_PTR(err)); 774 goto out_free_tx_irq; 775 } 776 777 err = bxcan_chip_start(ndev); 778 if (err) 779 goto out_free_sce_irq; 780 781 netif_start_queue(ndev); 782 return 0; 783 784 out_free_sce_irq: 785 free_irq(priv->sce_irq, ndev); 786 out_free_tx_irq: 787 free_irq(priv->tx_irq, ndev); 788 out_free_rx_irq: 789 free_irq(ndev->irq, ndev); 790 out_close_candev: 791 can_rx_offload_disable(&priv->offload); 792 close_candev(ndev); 793 out_disable_clock: 794 clk_disable_unprepare(priv->clk); 795 return err; 796 } 797 798 static void bxcan_chip_stop(struct net_device *ndev) 799 { 800 struct bxcan_priv *priv = netdev_priv(ndev); 801 struct bxcan_regs __iomem *regs = priv->regs; 802 803 /* disable all interrupts */ 804 bxcan_rmw(priv, ®s->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE | 805 BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE | 806 BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 | 807 BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | 808 BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0); 809 bxcan_disable_filters(priv, priv->primary); 810 bxcan_enter_sleep_mode(priv); 811 priv->can.state = CAN_STATE_STOPPED; 812 } 813 814 static int bxcan_stop(struct net_device *ndev) 815 { 816 struct bxcan_priv *priv = netdev_priv(ndev); 817 818 netif_stop_queue(ndev); 819 bxcan_chip_stop(ndev); 820 free_irq(ndev->irq, ndev); 821 free_irq(priv->tx_irq, ndev); 822 free_irq(priv->sce_irq, ndev); 823 can_rx_offload_disable(&priv->offload); 824 close_candev(ndev); 825 clk_disable_unprepare(priv->clk); 826 return 0; 827 } 828 829 static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb, 830 struct net_device *ndev) 831 { 832 struct bxcan_priv *priv = netdev_priv(ndev); 833 struct can_frame *cf = (struct can_frame *)skb->data; 834 struct bxcan_regs __iomem *regs = priv->regs; 835 struct bxcan_mb __iomem *mb_regs; 836 unsigned int idx; 837 u32 id; 838 int i, j; 839 840 if (can_dropped_invalid_skb(ndev, skb)) 841 return NETDEV_TX_OK; 842 843 if (bxcan_tx_busy(priv)) 844 return NETDEV_TX_BUSY; 845 846 idx = bxcan_get_tx_head(priv); 847 priv->tx_head++; 848 if (bxcan_get_tx_free(priv) == 0) 849 netif_stop_queue(ndev); 850 851 mb_regs = ®s->tx_mb[idx]; 852 if (cf->can_id & CAN_EFF_FLAG) 853 id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) | 854 BXCAN_TIxR_IDE; 855 else 856 id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id); 857 858 if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ 859 id |= BXCAN_TIxR_RTR; 860 } else { 861 for (i = 0, j = 0; i < cf->len; i += 4, j++) 862 writel(*(u32 *)(cf->data + i), &mb_regs->data[j]); 863 } 864 865 writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc); 866 867 can_put_echo_skb(skb, ndev, idx, 0); 868 869 /* Start transmission */ 870 writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id); 871 872 return NETDEV_TX_OK; 873 } 874 875 static const struct net_device_ops bxcan_netdev_ops = { 876 .ndo_open = bxcan_open, 877 .ndo_stop = bxcan_stop, 878 .ndo_start_xmit = bxcan_start_xmit, 879 .ndo_change_mtu = can_change_mtu, 880 }; 881 882 static const struct ethtool_ops bxcan_ethtool_ops = { 883 .get_ts_info = ethtool_op_get_ts_info, 884 }; 885 886 static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 887 { 888 int err; 889 890 switch (mode) { 891 case CAN_MODE_START: 892 err = bxcan_chip_start(ndev); 893 if (err) 894 return err; 895 896 netif_wake_queue(ndev); 897 break; 898 899 default: 900 return -EOPNOTSUPP; 901 } 902 903 return 0; 904 } 905 906 static int bxcan_get_berr_counter(const struct net_device *ndev, 907 struct can_berr_counter *bec) 908 { 909 struct bxcan_priv *priv = netdev_priv(ndev); 910 struct bxcan_regs __iomem *regs = priv->regs; 911 u32 esr; 912 int err; 913 914 err = clk_prepare_enable(priv->clk); 915 if (err) 916 return err; 917 918 esr = readl(®s->esr); 919 bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr); 920 bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr); 921 clk_disable_unprepare(priv->clk); 922 return 0; 923 } 924 925 static int bxcan_probe(struct platform_device *pdev) 926 { 927 struct device_node *np = pdev->dev.of_node; 928 struct device *dev = &pdev->dev; 929 struct net_device *ndev; 930 struct bxcan_priv *priv; 931 struct clk *clk = NULL; 932 void __iomem *regs; 933 struct regmap *gcan; 934 bool primary; 935 int err, rx_irq, tx_irq, sce_irq; 936 937 regs = devm_platform_ioremap_resource(pdev, 0); 938 if (IS_ERR(regs)) { 939 dev_err(dev, "failed to get base address\n"); 940 return PTR_ERR(regs); 941 } 942 943 gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan"); 944 if (IS_ERR(gcan)) { 945 dev_err(dev, "failed to get shared memory base address\n"); 946 return PTR_ERR(gcan); 947 } 948 949 primary = of_property_read_bool(np, "st,can-primary"); 950 clk = devm_clk_get(dev, NULL); 951 if (IS_ERR(clk)) { 952 dev_err(dev, "failed to get clock\n"); 953 return PTR_ERR(clk); 954 } 955 956 rx_irq = platform_get_irq_byname(pdev, "rx0"); 957 if (rx_irq < 0) { 958 dev_err(dev, "failed to get rx0 irq\n"); 959 return rx_irq; 960 } 961 962 tx_irq = platform_get_irq_byname(pdev, "tx"); 963 if (tx_irq < 0) { 964 dev_err(dev, "failed to get tx irq\n"); 965 return tx_irq; 966 } 967 968 sce_irq = platform_get_irq_byname(pdev, "sce"); 969 if (sce_irq < 0) { 970 dev_err(dev, "failed to get sce irq\n"); 971 return sce_irq; 972 } 973 974 ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM); 975 if (!ndev) { 976 dev_err(dev, "alloc_candev() failed\n"); 977 return -ENOMEM; 978 } 979 980 priv = netdev_priv(ndev); 981 platform_set_drvdata(pdev, ndev); 982 SET_NETDEV_DEV(ndev, dev); 983 ndev->netdev_ops = &bxcan_netdev_ops; 984 ndev->ethtool_ops = &bxcan_ethtool_ops; 985 ndev->irq = rx_irq; 986 ndev->flags |= IFF_ECHO; 987 988 priv->dev = dev; 989 priv->ndev = ndev; 990 priv->regs = regs; 991 priv->gcan = gcan; 992 priv->clk = clk; 993 priv->tx_irq = tx_irq; 994 priv->sce_irq = sce_irq; 995 priv->primary = primary; 996 priv->can.clock.freq = clk_get_rate(clk); 997 spin_lock_init(&priv->rmw_lock); 998 priv->tx_head = 0; 999 priv->tx_tail = 0; 1000 priv->can.bittiming_const = &bxcan_bittiming_const; 1001 priv->can.do_set_mode = bxcan_do_set_mode; 1002 priv->can.do_get_berr_counter = bxcan_get_berr_counter; 1003 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1004 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; 1005 1006 priv->offload.mailbox_read = bxcan_mailbox_read; 1007 err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT); 1008 if (err) { 1009 dev_err(dev, "failed to add FIFO rx_offload\n"); 1010 goto out_free_candev; 1011 } 1012 1013 err = register_candev(ndev); 1014 if (err) { 1015 dev_err(dev, "failed to register netdev\n"); 1016 goto out_can_rx_offload_del; 1017 } 1018 1019 dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq, 1020 tx_irq, rx_irq, sce_irq); 1021 return 0; 1022 1023 out_can_rx_offload_del: 1024 can_rx_offload_del(&priv->offload); 1025 out_free_candev: 1026 free_candev(ndev); 1027 return err; 1028 } 1029 1030 static int bxcan_remove(struct platform_device *pdev) 1031 { 1032 struct net_device *ndev = platform_get_drvdata(pdev); 1033 struct bxcan_priv *priv = netdev_priv(ndev); 1034 1035 unregister_candev(ndev); 1036 clk_disable_unprepare(priv->clk); 1037 can_rx_offload_del(&priv->offload); 1038 free_candev(ndev); 1039 return 0; 1040 } 1041 1042 static int __maybe_unused bxcan_suspend(struct device *dev) 1043 { 1044 struct net_device *ndev = dev_get_drvdata(dev); 1045 struct bxcan_priv *priv = netdev_priv(ndev); 1046 1047 if (!netif_running(ndev)) 1048 return 0; 1049 1050 netif_stop_queue(ndev); 1051 netif_device_detach(ndev); 1052 1053 bxcan_enter_sleep_mode(priv); 1054 priv->can.state = CAN_STATE_SLEEPING; 1055 clk_disable_unprepare(priv->clk); 1056 return 0; 1057 } 1058 1059 static int __maybe_unused bxcan_resume(struct device *dev) 1060 { 1061 struct net_device *ndev = dev_get_drvdata(dev); 1062 struct bxcan_priv *priv = netdev_priv(ndev); 1063 1064 if (!netif_running(ndev)) 1065 return 0; 1066 1067 clk_prepare_enable(priv->clk); 1068 bxcan_leave_sleep_mode(priv); 1069 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1070 1071 netif_device_attach(ndev); 1072 netif_start_queue(ndev); 1073 return 0; 1074 } 1075 1076 static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume); 1077 1078 static const struct of_device_id bxcan_of_match[] = { 1079 {.compatible = "st,stm32f4-bxcan"}, 1080 { /* sentinel */ }, 1081 }; 1082 MODULE_DEVICE_TABLE(of, bxcan_of_match); 1083 1084 static struct platform_driver bxcan_driver = { 1085 .driver = { 1086 .name = KBUILD_MODNAME, 1087 .pm = &bxcan_pm_ops, 1088 .of_match_table = bxcan_of_match, 1089 }, 1090 .probe = bxcan_probe, 1091 .remove = bxcan_remove, 1092 }; 1093 1094 module_platform_driver(bxcan_driver); 1095 1096 MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); 1097 MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver"); 1098 MODULE_LICENSE("GPL"); 1099