1 // SPDX-License-Identifier: GPL-2.0 2 // CAN bus driver for Bosch M_CAN controller 3 // Copyright (C) 2014 Freescale Semiconductor, Inc. 4 // Dong Aisheng <b29396@freescale.com> 5 // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ 6 7 /* Bosch M_CAN user manual can be obtained from: 8 * https://github.com/linux-can/can-doc/tree/master/m_can 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/of.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/iopoll.h> 22 #include <linux/can/dev.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/phy/phy.h> 25 26 #include "m_can.h" 27 28 /* registers definition */ 29 enum m_can_reg { 30 M_CAN_CREL = 0x0, 31 M_CAN_ENDN = 0x4, 32 M_CAN_CUST = 0x8, 33 M_CAN_DBTP = 0xc, 34 M_CAN_TEST = 0x10, 35 M_CAN_RWD = 0x14, 36 M_CAN_CCCR = 0x18, 37 M_CAN_NBTP = 0x1c, 38 M_CAN_TSCC = 0x20, 39 M_CAN_TSCV = 0x24, 40 M_CAN_TOCC = 0x28, 41 M_CAN_TOCV = 0x2c, 42 M_CAN_ECR = 0x40, 43 M_CAN_PSR = 0x44, 44 /* TDCR Register only available for version >=3.1.x */ 45 M_CAN_TDCR = 0x48, 46 M_CAN_IR = 0x50, 47 M_CAN_IE = 0x54, 48 M_CAN_ILS = 0x58, 49 M_CAN_ILE = 0x5c, 50 M_CAN_GFC = 0x80, 51 M_CAN_SIDFC = 0x84, 52 M_CAN_XIDFC = 0x88, 53 M_CAN_XIDAM = 0x90, 54 M_CAN_HPMS = 0x94, 55 M_CAN_NDAT1 = 0x98, 56 M_CAN_NDAT2 = 0x9c, 57 M_CAN_RXF0C = 0xa0, 58 M_CAN_RXF0S = 0xa4, 59 M_CAN_RXF0A = 0xa8, 60 M_CAN_RXBC = 0xac, 61 M_CAN_RXF1C = 0xb0, 62 M_CAN_RXF1S = 0xb4, 63 M_CAN_RXF1A = 0xb8, 64 M_CAN_RXESC = 0xbc, 65 M_CAN_TXBC = 0xc0, 66 M_CAN_TXFQS = 0xc4, 67 M_CAN_TXESC = 0xc8, 68 M_CAN_TXBRP = 0xcc, 69 M_CAN_TXBAR = 0xd0, 70 M_CAN_TXBCR = 0xd4, 71 M_CAN_TXBTO = 0xd8, 72 M_CAN_TXBCF = 0xdc, 73 M_CAN_TXBTIE = 0xe0, 74 M_CAN_TXBCIE = 0xe4, 75 M_CAN_TXEFC = 0xf0, 76 M_CAN_TXEFS = 0xf4, 77 M_CAN_TXEFA = 0xf8, 78 }; 79 80 /* napi related */ 81 #define M_CAN_NAPI_WEIGHT 64 82 83 /* message ram configuration data length */ 84 #define MRAM_CFG_LEN 8 85 86 /* Core Release Register (CREL) */ 87 #define CREL_REL_MASK GENMASK(31, 28) 88 #define CREL_STEP_MASK GENMASK(27, 24) 89 #define CREL_SUBSTEP_MASK GENMASK(23, 20) 90 91 /* Data Bit Timing & Prescaler Register (DBTP) */ 92 #define DBTP_TDC BIT(23) 93 #define DBTP_DBRP_MASK GENMASK(20, 16) 94 #define DBTP_DTSEG1_MASK GENMASK(12, 8) 95 #define DBTP_DTSEG2_MASK GENMASK(7, 4) 96 #define DBTP_DSJW_MASK GENMASK(3, 0) 97 98 /* Transmitter Delay Compensation Register (TDCR) */ 99 #define TDCR_TDCO_MASK GENMASK(14, 8) 100 #define TDCR_TDCF_MASK GENMASK(6, 0) 101 102 /* Test Register (TEST) */ 103 #define TEST_LBCK BIT(4) 104 105 /* CC Control Register (CCCR) */ 106 #define CCCR_TXP BIT(14) 107 #define CCCR_TEST BIT(7) 108 #define CCCR_DAR BIT(6) 109 #define CCCR_MON BIT(5) 110 #define CCCR_CSR BIT(4) 111 #define CCCR_CSA BIT(3) 112 #define CCCR_ASM BIT(2) 113 #define CCCR_CCE BIT(1) 114 #define CCCR_INIT BIT(0) 115 /* for version 3.0.x */ 116 #define CCCR_CMR_MASK GENMASK(11, 10) 117 #define CCCR_CMR_CANFD 0x1 118 #define CCCR_CMR_CANFD_BRS 0x2 119 #define CCCR_CMR_CAN 0x3 120 #define CCCR_CME_MASK GENMASK(9, 8) 121 #define CCCR_CME_CAN 0 122 #define CCCR_CME_CANFD 0x1 123 #define CCCR_CME_CANFD_BRS 0x2 124 /* for version >=3.1.x */ 125 #define CCCR_EFBI BIT(13) 126 #define CCCR_PXHD BIT(12) 127 #define CCCR_BRSE BIT(9) 128 #define CCCR_FDOE BIT(8) 129 /* for version >=3.2.x */ 130 #define CCCR_NISO BIT(15) 131 /* for version >=3.3.x */ 132 #define CCCR_WMM BIT(11) 133 #define CCCR_UTSU BIT(10) 134 135 /* Nominal Bit Timing & Prescaler Register (NBTP) */ 136 #define NBTP_NSJW_MASK GENMASK(31, 25) 137 #define NBTP_NBRP_MASK GENMASK(24, 16) 138 #define NBTP_NTSEG1_MASK GENMASK(15, 8) 139 #define NBTP_NTSEG2_MASK GENMASK(6, 0) 140 141 /* Timestamp Counter Configuration Register (TSCC) */ 142 #define TSCC_TCP_MASK GENMASK(19, 16) 143 #define TSCC_TSS_MASK GENMASK(1, 0) 144 #define TSCC_TSS_DISABLE 0x0 145 #define TSCC_TSS_INTERNAL 0x1 146 #define TSCC_TSS_EXTERNAL 0x2 147 148 /* Timestamp Counter Value Register (TSCV) */ 149 #define TSCV_TSC_MASK GENMASK(15, 0) 150 151 /* Error Counter Register (ECR) */ 152 #define ECR_RP BIT(15) 153 #define ECR_REC_MASK GENMASK(14, 8) 154 #define ECR_TEC_MASK GENMASK(7, 0) 155 156 /* Protocol Status Register (PSR) */ 157 #define PSR_BO BIT(7) 158 #define PSR_EW BIT(6) 159 #define PSR_EP BIT(5) 160 #define PSR_LEC_MASK GENMASK(2, 0) 161 162 /* Interrupt Register (IR) */ 163 #define IR_ALL_INT 0xffffffff 164 165 /* Renamed bits for versions > 3.1.x */ 166 #define IR_ARA BIT(29) 167 #define IR_PED BIT(28) 168 #define IR_PEA BIT(27) 169 170 /* Bits for version 3.0.x */ 171 #define IR_STE BIT(31) 172 #define IR_FOE BIT(30) 173 #define IR_ACKE BIT(29) 174 #define IR_BE BIT(28) 175 #define IR_CRCE BIT(27) 176 #define IR_WDI BIT(26) 177 #define IR_BO BIT(25) 178 #define IR_EW BIT(24) 179 #define IR_EP BIT(23) 180 #define IR_ELO BIT(22) 181 #define IR_BEU BIT(21) 182 #define IR_BEC BIT(20) 183 #define IR_DRX BIT(19) 184 #define IR_TOO BIT(18) 185 #define IR_MRAF BIT(17) 186 #define IR_TSW BIT(16) 187 #define IR_TEFL BIT(15) 188 #define IR_TEFF BIT(14) 189 #define IR_TEFW BIT(13) 190 #define IR_TEFN BIT(12) 191 #define IR_TFE BIT(11) 192 #define IR_TCF BIT(10) 193 #define IR_TC BIT(9) 194 #define IR_HPM BIT(8) 195 #define IR_RF1L BIT(7) 196 #define IR_RF1F BIT(6) 197 #define IR_RF1W BIT(5) 198 #define IR_RF1N BIT(4) 199 #define IR_RF0L BIT(3) 200 #define IR_RF0F BIT(2) 201 #define IR_RF0W BIT(1) 202 #define IR_RF0N BIT(0) 203 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) 204 205 /* Interrupts for version 3.0.x */ 206 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) 207 #define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ 208 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ 209 IR_RF0L) 210 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) 211 212 /* Interrupts for version >= 3.1.x */ 213 #define IR_ERR_LEC_31X (IR_PED | IR_PEA) 214 #define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ 215 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ 216 IR_RF0L) 217 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) 218 219 /* Interrupt Line Select (ILS) */ 220 #define ILS_ALL_INT0 0x0 221 #define ILS_ALL_INT1 0xFFFFFFFF 222 223 /* Interrupt Line Enable (ILE) */ 224 #define ILE_EINT1 BIT(1) 225 #define ILE_EINT0 BIT(0) 226 227 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 228 #define RXFC_FWM_MASK GENMASK(30, 24) 229 #define RXFC_FS_MASK GENMASK(22, 16) 230 231 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ 232 #define RXFS_RFL BIT(25) 233 #define RXFS_FF BIT(24) 234 #define RXFS_FPI_MASK GENMASK(21, 16) 235 #define RXFS_FGI_MASK GENMASK(13, 8) 236 #define RXFS_FFL_MASK GENMASK(6, 0) 237 238 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ 239 #define RXESC_RBDS_MASK GENMASK(10, 8) 240 #define RXESC_F1DS_MASK GENMASK(6, 4) 241 #define RXESC_F0DS_MASK GENMASK(2, 0) 242 #define RXESC_64B 0x7 243 244 /* Tx Buffer Configuration (TXBC) */ 245 #define TXBC_TFQS_MASK GENMASK(29, 24) 246 #define TXBC_NDTB_MASK GENMASK(21, 16) 247 248 /* Tx FIFO/Queue Status (TXFQS) */ 249 #define TXFQS_TFQF BIT(21) 250 #define TXFQS_TFQPI_MASK GENMASK(20, 16) 251 #define TXFQS_TFGI_MASK GENMASK(12, 8) 252 #define TXFQS_TFFL_MASK GENMASK(5, 0) 253 254 /* Tx Buffer Element Size Configuration (TXESC) */ 255 #define TXESC_TBDS_MASK GENMASK(2, 0) 256 #define TXESC_TBDS_64B 0x7 257 258 /* Tx Event FIFO Configuration (TXEFC) */ 259 #define TXEFC_EFS_MASK GENMASK(21, 16) 260 261 /* Tx Event FIFO Status (TXEFS) */ 262 #define TXEFS_TEFL BIT(25) 263 #define TXEFS_EFF BIT(24) 264 #define TXEFS_EFGI_MASK GENMASK(12, 8) 265 #define TXEFS_EFFL_MASK GENMASK(5, 0) 266 267 /* Tx Event FIFO Acknowledge (TXEFA) */ 268 #define TXEFA_EFAI_MASK GENMASK(4, 0) 269 270 /* Message RAM Configuration (in bytes) */ 271 #define SIDF_ELEMENT_SIZE 4 272 #define XIDF_ELEMENT_SIZE 8 273 #define RXF0_ELEMENT_SIZE 72 274 #define RXF1_ELEMENT_SIZE 72 275 #define RXB_ELEMENT_SIZE 72 276 #define TXE_ELEMENT_SIZE 8 277 #define TXB_ELEMENT_SIZE 72 278 279 /* Message RAM Elements */ 280 #define M_CAN_FIFO_ID 0x0 281 #define M_CAN_FIFO_DLC 0x4 282 #define M_CAN_FIFO_DATA 0x8 283 284 /* Rx Buffer Element */ 285 /* R0 */ 286 #define RX_BUF_ESI BIT(31) 287 #define RX_BUF_XTD BIT(30) 288 #define RX_BUF_RTR BIT(29) 289 /* R1 */ 290 #define RX_BUF_ANMF BIT(31) 291 #define RX_BUF_FDF BIT(21) 292 #define RX_BUF_BRS BIT(20) 293 #define RX_BUF_RXTS_MASK GENMASK(15, 0) 294 295 /* Tx Buffer Element */ 296 /* T0 */ 297 #define TX_BUF_ESI BIT(31) 298 #define TX_BUF_XTD BIT(30) 299 #define TX_BUF_RTR BIT(29) 300 /* T1 */ 301 #define TX_BUF_EFC BIT(23) 302 #define TX_BUF_FDF BIT(21) 303 #define TX_BUF_BRS BIT(20) 304 #define TX_BUF_MM_MASK GENMASK(31, 24) 305 #define TX_BUF_DLC_MASK GENMASK(19, 16) 306 307 /* Tx event FIFO Element */ 308 /* E1 */ 309 #define TX_EVENT_MM_MASK GENMASK(31, 24) 310 #define TX_EVENT_TXTS_MASK GENMASK(15, 0) 311 312 /* The ID and DLC registers are adjacent in M_CAN FIFO memory, 313 * and we can save a (potentially slow) bus round trip by combining 314 * reads and writes to them. 315 */ 316 struct id_and_dlc { 317 u32 id; 318 u32 dlc; 319 }; 320 321 static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) 322 { 323 return cdev->ops->read_reg(cdev, reg); 324 } 325 326 static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, 327 u32 val) 328 { 329 cdev->ops->write_reg(cdev, reg, val); 330 } 331 332 static int 333 m_can_fifo_read(struct m_can_classdev *cdev, 334 u32 fgi, unsigned int offset, void *val, size_t val_count) 335 { 336 u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + 337 offset; 338 339 if (val_count == 0) 340 return 0; 341 342 return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); 343 } 344 345 static int 346 m_can_fifo_write(struct m_can_classdev *cdev, 347 u32 fpi, unsigned int offset, const void *val, size_t val_count) 348 { 349 u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + 350 offset; 351 352 if (val_count == 0) 353 return 0; 354 355 return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); 356 } 357 358 static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev, 359 u32 fpi, u32 val) 360 { 361 return cdev->ops->write_fifo(cdev, fpi, &val, 1); 362 } 363 364 static int 365 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) 366 { 367 u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + 368 offset; 369 370 return cdev->ops->read_fifo(cdev, addr_offset, val, 1); 371 } 372 373 static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) 374 { 375 return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); 376 } 377 378 static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) 379 { 380 u32 cccr = m_can_read(cdev, M_CAN_CCCR); 381 u32 timeout = 10; 382 u32 val = 0; 383 384 /* Clear the Clock stop request if it was set */ 385 if (cccr & CCCR_CSR) 386 cccr &= ~CCCR_CSR; 387 388 if (enable) { 389 /* enable m_can configuration */ 390 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); 391 udelay(5); 392 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ 393 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); 394 } else { 395 m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); 396 } 397 398 /* there's a delay for module initialization */ 399 if (enable) 400 val = CCCR_INIT | CCCR_CCE; 401 402 while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { 403 if (timeout == 0) { 404 netdev_warn(cdev->net, "Failed to init module\n"); 405 return; 406 } 407 timeout--; 408 udelay(1); 409 } 410 } 411 412 static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) 413 { 414 /* Only interrupt line 0 is used in this driver */ 415 m_can_write(cdev, M_CAN_ILE, ILE_EINT0); 416 } 417 418 static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) 419 { 420 m_can_write(cdev, M_CAN_ILE, 0x0); 421 } 422 423 /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit 424 * width. 425 */ 426 static u32 m_can_get_timestamp(struct m_can_classdev *cdev) 427 { 428 u32 tscv; 429 u32 tsc; 430 431 tscv = m_can_read(cdev, M_CAN_TSCV); 432 tsc = FIELD_GET(TSCV_TSC_MASK, tscv); 433 434 return (tsc << 16); 435 } 436 437 static void m_can_clean(struct net_device *net) 438 { 439 struct m_can_classdev *cdev = netdev_priv(net); 440 441 if (cdev->tx_skb) { 442 int putidx = 0; 443 444 net->stats.tx_errors++; 445 if (cdev->version > 30) 446 putidx = FIELD_GET(TXFQS_TFQPI_MASK, 447 m_can_read(cdev, M_CAN_TXFQS)); 448 449 can_free_echo_skb(cdev->net, putidx, NULL); 450 cdev->tx_skb = NULL; 451 } 452 } 453 454 /* For peripherals, pass skb to rx-offload, which will push skb from 455 * napi. For non-peripherals, RX is done in napi already, so push 456 * directly. timestamp is used to ensure good skb ordering in 457 * rx-offload and is ignored for non-peripherals. 458 */ 459 static void m_can_receive_skb(struct m_can_classdev *cdev, 460 struct sk_buff *skb, 461 u32 timestamp) 462 { 463 if (cdev->is_peripheral) { 464 struct net_device_stats *stats = &cdev->net->stats; 465 int err; 466 467 err = can_rx_offload_queue_sorted(&cdev->offload, skb, 468 timestamp); 469 if (err) 470 stats->rx_fifo_errors++; 471 } else { 472 netif_receive_skb(skb); 473 } 474 } 475 476 static int m_can_read_fifo(struct net_device *dev, u32 rxfs) 477 { 478 struct net_device_stats *stats = &dev->stats; 479 struct m_can_classdev *cdev = netdev_priv(dev); 480 struct canfd_frame *cf; 481 struct sk_buff *skb; 482 struct id_and_dlc fifo_header; 483 u32 fgi; 484 u32 timestamp = 0; 485 int err; 486 487 /* calculate the fifo get index for where to read data */ 488 fgi = FIELD_GET(RXFS_FGI_MASK, rxfs); 489 err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); 490 if (err) 491 goto out_fail; 492 493 if (fifo_header.dlc & RX_BUF_FDF) 494 skb = alloc_canfd_skb(dev, &cf); 495 else 496 skb = alloc_can_skb(dev, (struct can_frame **)&cf); 497 if (!skb) { 498 stats->rx_dropped++; 499 return 0; 500 } 501 502 if (fifo_header.dlc & RX_BUF_FDF) 503 cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); 504 else 505 cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); 506 507 if (fifo_header.id & RX_BUF_XTD) 508 cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; 509 else 510 cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; 511 512 if (fifo_header.id & RX_BUF_ESI) { 513 cf->flags |= CANFD_ESI; 514 netdev_dbg(dev, "ESI Error\n"); 515 } 516 517 if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) { 518 cf->can_id |= CAN_RTR_FLAG; 519 } else { 520 if (fifo_header.dlc & RX_BUF_BRS) 521 cf->flags |= CANFD_BRS; 522 523 err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, 524 cf->data, DIV_ROUND_UP(cf->len, 4)); 525 if (err) 526 goto out_free_skb; 527 528 stats->rx_bytes += cf->len; 529 } 530 stats->rx_packets++; 531 532 /* acknowledge rx fifo 0 */ 533 m_can_write(cdev, M_CAN_RXF0A, fgi); 534 535 timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc); 536 537 m_can_receive_skb(cdev, skb, timestamp); 538 539 return 0; 540 541 out_free_skb: 542 kfree_skb(skb); 543 out_fail: 544 netdev_err(dev, "FIFO read returned %d\n", err); 545 return err; 546 } 547 548 static int m_can_do_rx_poll(struct net_device *dev, int quota) 549 { 550 struct m_can_classdev *cdev = netdev_priv(dev); 551 u32 pkts = 0; 552 u32 rxfs; 553 int err; 554 555 rxfs = m_can_read(cdev, M_CAN_RXF0S); 556 if (!(rxfs & RXFS_FFL_MASK)) { 557 netdev_dbg(dev, "no messages in fifo0\n"); 558 return 0; 559 } 560 561 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { 562 err = m_can_read_fifo(dev, rxfs); 563 if (err) 564 return err; 565 566 quota--; 567 pkts++; 568 rxfs = m_can_read(cdev, M_CAN_RXF0S); 569 } 570 571 if (pkts) 572 can_led_event(dev, CAN_LED_EVENT_RX); 573 574 return pkts; 575 } 576 577 static int m_can_handle_lost_msg(struct net_device *dev) 578 { 579 struct m_can_classdev *cdev = netdev_priv(dev); 580 struct net_device_stats *stats = &dev->stats; 581 struct sk_buff *skb; 582 struct can_frame *frame; 583 u32 timestamp = 0; 584 585 netdev_err(dev, "msg lost in rxf0\n"); 586 587 stats->rx_errors++; 588 stats->rx_over_errors++; 589 590 skb = alloc_can_err_skb(dev, &frame); 591 if (unlikely(!skb)) 592 return 0; 593 594 frame->can_id |= CAN_ERR_CRTL; 595 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 596 597 if (cdev->is_peripheral) 598 timestamp = m_can_get_timestamp(cdev); 599 600 m_can_receive_skb(cdev, skb, timestamp); 601 602 return 1; 603 } 604 605 static int m_can_handle_lec_err(struct net_device *dev, 606 enum m_can_lec_type lec_type) 607 { 608 struct m_can_classdev *cdev = netdev_priv(dev); 609 struct net_device_stats *stats = &dev->stats; 610 struct can_frame *cf; 611 struct sk_buff *skb; 612 u32 timestamp = 0; 613 614 cdev->can.can_stats.bus_error++; 615 stats->rx_errors++; 616 617 /* propagate the error condition to the CAN stack */ 618 skb = alloc_can_err_skb(dev, &cf); 619 if (unlikely(!skb)) 620 return 0; 621 622 /* check for 'last error code' which tells us the 623 * type of the last error to occur on the CAN bus 624 */ 625 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 626 627 switch (lec_type) { 628 case LEC_STUFF_ERROR: 629 netdev_dbg(dev, "stuff error\n"); 630 cf->data[2] |= CAN_ERR_PROT_STUFF; 631 break; 632 case LEC_FORM_ERROR: 633 netdev_dbg(dev, "form error\n"); 634 cf->data[2] |= CAN_ERR_PROT_FORM; 635 break; 636 case LEC_ACK_ERROR: 637 netdev_dbg(dev, "ack error\n"); 638 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 639 break; 640 case LEC_BIT1_ERROR: 641 netdev_dbg(dev, "bit1 error\n"); 642 cf->data[2] |= CAN_ERR_PROT_BIT1; 643 break; 644 case LEC_BIT0_ERROR: 645 netdev_dbg(dev, "bit0 error\n"); 646 cf->data[2] |= CAN_ERR_PROT_BIT0; 647 break; 648 case LEC_CRC_ERROR: 649 netdev_dbg(dev, "CRC error\n"); 650 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 651 break; 652 default: 653 break; 654 } 655 656 if (cdev->is_peripheral) 657 timestamp = m_can_get_timestamp(cdev); 658 659 m_can_receive_skb(cdev, skb, timestamp); 660 661 return 1; 662 } 663 664 static int __m_can_get_berr_counter(const struct net_device *dev, 665 struct can_berr_counter *bec) 666 { 667 struct m_can_classdev *cdev = netdev_priv(dev); 668 unsigned int ecr; 669 670 ecr = m_can_read(cdev, M_CAN_ECR); 671 bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr); 672 bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr); 673 674 return 0; 675 } 676 677 static int m_can_clk_start(struct m_can_classdev *cdev) 678 { 679 if (cdev->pm_clock_support == 0) 680 return 0; 681 682 return pm_runtime_resume_and_get(cdev->dev); 683 } 684 685 static void m_can_clk_stop(struct m_can_classdev *cdev) 686 { 687 if (cdev->pm_clock_support) 688 pm_runtime_put_sync(cdev->dev); 689 } 690 691 static int m_can_get_berr_counter(const struct net_device *dev, 692 struct can_berr_counter *bec) 693 { 694 struct m_can_classdev *cdev = netdev_priv(dev); 695 int err; 696 697 err = m_can_clk_start(cdev); 698 if (err) 699 return err; 700 701 __m_can_get_berr_counter(dev, bec); 702 703 m_can_clk_stop(cdev); 704 705 return 0; 706 } 707 708 static int m_can_handle_state_change(struct net_device *dev, 709 enum can_state new_state) 710 { 711 struct m_can_classdev *cdev = netdev_priv(dev); 712 struct can_frame *cf; 713 struct sk_buff *skb; 714 struct can_berr_counter bec; 715 unsigned int ecr; 716 u32 timestamp = 0; 717 718 switch (new_state) { 719 case CAN_STATE_ERROR_WARNING: 720 /* error warning state */ 721 cdev->can.can_stats.error_warning++; 722 cdev->can.state = CAN_STATE_ERROR_WARNING; 723 break; 724 case CAN_STATE_ERROR_PASSIVE: 725 /* error passive state */ 726 cdev->can.can_stats.error_passive++; 727 cdev->can.state = CAN_STATE_ERROR_PASSIVE; 728 break; 729 case CAN_STATE_BUS_OFF: 730 /* bus-off state */ 731 cdev->can.state = CAN_STATE_BUS_OFF; 732 m_can_disable_all_interrupts(cdev); 733 cdev->can.can_stats.bus_off++; 734 can_bus_off(dev); 735 break; 736 default: 737 break; 738 } 739 740 /* propagate the error condition to the CAN stack */ 741 skb = alloc_can_err_skb(dev, &cf); 742 if (unlikely(!skb)) 743 return 0; 744 745 __m_can_get_berr_counter(dev, &bec); 746 747 switch (new_state) { 748 case CAN_STATE_ERROR_WARNING: 749 /* error warning state */ 750 cf->can_id |= CAN_ERR_CRTL; 751 cf->data[1] = (bec.txerr > bec.rxerr) ? 752 CAN_ERR_CRTL_TX_WARNING : 753 CAN_ERR_CRTL_RX_WARNING; 754 cf->data[6] = bec.txerr; 755 cf->data[7] = bec.rxerr; 756 break; 757 case CAN_STATE_ERROR_PASSIVE: 758 /* error passive state */ 759 cf->can_id |= CAN_ERR_CRTL; 760 ecr = m_can_read(cdev, M_CAN_ECR); 761 if (ecr & ECR_RP) 762 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 763 if (bec.txerr > 127) 764 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 765 cf->data[6] = bec.txerr; 766 cf->data[7] = bec.rxerr; 767 break; 768 case CAN_STATE_BUS_OFF: 769 /* bus-off state */ 770 cf->can_id |= CAN_ERR_BUSOFF; 771 break; 772 default: 773 break; 774 } 775 776 if (cdev->is_peripheral) 777 timestamp = m_can_get_timestamp(cdev); 778 779 m_can_receive_skb(cdev, skb, timestamp); 780 781 return 1; 782 } 783 784 static int m_can_handle_state_errors(struct net_device *dev, u32 psr) 785 { 786 struct m_can_classdev *cdev = netdev_priv(dev); 787 int work_done = 0; 788 789 if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { 790 netdev_dbg(dev, "entered error warning state\n"); 791 work_done += m_can_handle_state_change(dev, 792 CAN_STATE_ERROR_WARNING); 793 } 794 795 if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { 796 netdev_dbg(dev, "entered error passive state\n"); 797 work_done += m_can_handle_state_change(dev, 798 CAN_STATE_ERROR_PASSIVE); 799 } 800 801 if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { 802 netdev_dbg(dev, "entered error bus off state\n"); 803 work_done += m_can_handle_state_change(dev, 804 CAN_STATE_BUS_OFF); 805 } 806 807 return work_done; 808 } 809 810 static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) 811 { 812 if (irqstatus & IR_WDI) 813 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); 814 if (irqstatus & IR_BEU) 815 netdev_err(dev, "Bit Error Uncorrected\n"); 816 if (irqstatus & IR_BEC) 817 netdev_err(dev, "Bit Error Corrected\n"); 818 if (irqstatus & IR_TOO) 819 netdev_err(dev, "Timeout reached\n"); 820 if (irqstatus & IR_MRAF) 821 netdev_err(dev, "Message RAM access failure occurred\n"); 822 } 823 824 static inline bool is_lec_err(u32 psr) 825 { 826 psr &= LEC_UNUSED; 827 828 return psr && (psr != LEC_UNUSED); 829 } 830 831 static inline bool m_can_is_protocol_err(u32 irqstatus) 832 { 833 return irqstatus & IR_ERR_LEC_31X; 834 } 835 836 static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) 837 { 838 struct net_device_stats *stats = &dev->stats; 839 struct m_can_classdev *cdev = netdev_priv(dev); 840 struct can_frame *cf; 841 struct sk_buff *skb; 842 u32 timestamp = 0; 843 844 /* propagate the error condition to the CAN stack */ 845 skb = alloc_can_err_skb(dev, &cf); 846 847 /* update tx error stats since there is protocol error */ 848 stats->tx_errors++; 849 850 /* update arbitration lost status */ 851 if (cdev->version >= 31 && (irqstatus & IR_PEA)) { 852 netdev_dbg(dev, "Protocol error in Arbitration fail\n"); 853 cdev->can.can_stats.arbitration_lost++; 854 if (skb) { 855 cf->can_id |= CAN_ERR_LOSTARB; 856 cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; 857 } 858 } 859 860 if (unlikely(!skb)) { 861 netdev_dbg(dev, "allocation of skb failed\n"); 862 return 0; 863 } 864 865 if (cdev->is_peripheral) 866 timestamp = m_can_get_timestamp(cdev); 867 868 m_can_receive_skb(cdev, skb, timestamp); 869 870 return 1; 871 } 872 873 static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, 874 u32 psr) 875 { 876 struct m_can_classdev *cdev = netdev_priv(dev); 877 int work_done = 0; 878 879 if (irqstatus & IR_RF0L) 880 work_done += m_can_handle_lost_msg(dev); 881 882 /* handle lec errors on the bus */ 883 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 884 is_lec_err(psr)) 885 work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); 886 887 /* handle protocol errors in arbitration phase */ 888 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 889 m_can_is_protocol_err(irqstatus)) 890 work_done += m_can_handle_protocol_error(dev, irqstatus); 891 892 /* other unproccessed error interrupts */ 893 m_can_handle_other_err(dev, irqstatus); 894 895 return work_done; 896 } 897 898 static int m_can_rx_handler(struct net_device *dev, int quota) 899 { 900 struct m_can_classdev *cdev = netdev_priv(dev); 901 int rx_work_or_err; 902 int work_done = 0; 903 u32 irqstatus, psr; 904 905 irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); 906 if (!irqstatus) 907 goto end; 908 909 /* Errata workaround for issue "Needless activation of MRAF irq" 910 * During frame reception while the MCAN is in Error Passive state 911 * and the Receive Error Counter has the value MCAN_ECR.REC = 127, 912 * it may happen that MCAN_IR.MRAF is set although there was no 913 * Message RAM access failure. 914 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated 915 * The Message RAM Access Failure interrupt routine needs to check 916 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. 917 * In this case, reset MCAN_IR.MRAF. No further action is required. 918 */ 919 if (cdev->version <= 31 && irqstatus & IR_MRAF && 920 m_can_read(cdev, M_CAN_ECR) & ECR_RP) { 921 struct can_berr_counter bec; 922 923 __m_can_get_berr_counter(dev, &bec); 924 if (bec.rxerr == 127) { 925 m_can_write(cdev, M_CAN_IR, IR_MRAF); 926 irqstatus &= ~IR_MRAF; 927 } 928 } 929 930 psr = m_can_read(cdev, M_CAN_PSR); 931 932 if (irqstatus & IR_ERR_STATE) 933 work_done += m_can_handle_state_errors(dev, psr); 934 935 if (irqstatus & IR_ERR_BUS_30X) 936 work_done += m_can_handle_bus_errors(dev, irqstatus, psr); 937 938 if (irqstatus & IR_RF0N) { 939 rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); 940 if (rx_work_or_err < 0) 941 return rx_work_or_err; 942 943 work_done += rx_work_or_err; 944 } 945 end: 946 return work_done; 947 } 948 949 static int m_can_rx_peripheral(struct net_device *dev) 950 { 951 struct m_can_classdev *cdev = netdev_priv(dev); 952 int work_done; 953 954 work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); 955 956 /* Don't re-enable interrupts if the driver had a fatal error 957 * (e.g., FIFO read failure). 958 */ 959 if (work_done >= 0) 960 m_can_enable_all_interrupts(cdev); 961 962 return work_done; 963 } 964 965 static int m_can_poll(struct napi_struct *napi, int quota) 966 { 967 struct net_device *dev = napi->dev; 968 struct m_can_classdev *cdev = netdev_priv(dev); 969 int work_done; 970 971 work_done = m_can_rx_handler(dev, quota); 972 973 /* Don't re-enable interrupts if the driver had a fatal error 974 * (e.g., FIFO read failure). 975 */ 976 if (work_done >= 0 && work_done < quota) { 977 napi_complete_done(napi, work_done); 978 m_can_enable_all_interrupts(cdev); 979 } 980 981 return work_done; 982 } 983 984 /* Echo tx skb and update net stats. Peripherals use rx-offload for 985 * echo. timestamp is used for peripherals to ensure correct ordering 986 * by rx-offload, and is ignored for non-peripherals. 987 */ 988 static void m_can_tx_update_stats(struct m_can_classdev *cdev, 989 unsigned int msg_mark, 990 u32 timestamp) 991 { 992 struct net_device *dev = cdev->net; 993 struct net_device_stats *stats = &dev->stats; 994 995 if (cdev->is_peripheral) 996 stats->tx_bytes += 997 can_rx_offload_get_echo_skb(&cdev->offload, 998 msg_mark, 999 timestamp, 1000 NULL); 1001 else 1002 stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); 1003 1004 stats->tx_packets++; 1005 } 1006 1007 static int m_can_echo_tx_event(struct net_device *dev) 1008 { 1009 u32 txe_count = 0; 1010 u32 m_can_txefs; 1011 u32 fgi = 0; 1012 int i = 0; 1013 unsigned int msg_mark; 1014 1015 struct m_can_classdev *cdev = netdev_priv(dev); 1016 1017 /* read tx event fifo status */ 1018 m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); 1019 1020 /* Get Tx Event fifo element count */ 1021 txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs); 1022 1023 /* Get and process all sent elements */ 1024 for (i = 0; i < txe_count; i++) { 1025 u32 txe, timestamp = 0; 1026 int err; 1027 1028 /* retrieve get index */ 1029 fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS)); 1030 1031 /* get message marker, timestamp */ 1032 err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); 1033 if (err) { 1034 netdev_err(dev, "TXE FIFO read returned %d\n", err); 1035 return err; 1036 } 1037 1038 msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); 1039 timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe); 1040 1041 /* ack txe element */ 1042 m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, 1043 fgi)); 1044 1045 /* update stats */ 1046 m_can_tx_update_stats(cdev, msg_mark, timestamp); 1047 } 1048 1049 return 0; 1050 } 1051 1052 static irqreturn_t m_can_isr(int irq, void *dev_id) 1053 { 1054 struct net_device *dev = (struct net_device *)dev_id; 1055 struct m_can_classdev *cdev = netdev_priv(dev); 1056 u32 ir; 1057 1058 if (pm_runtime_suspended(cdev->dev)) 1059 return IRQ_NONE; 1060 ir = m_can_read(cdev, M_CAN_IR); 1061 if (!ir) 1062 return IRQ_NONE; 1063 1064 /* ACK all irqs */ 1065 if (ir & IR_ALL_INT) 1066 m_can_write(cdev, M_CAN_IR, ir); 1067 1068 if (cdev->ops->clear_interrupts) 1069 cdev->ops->clear_interrupts(cdev); 1070 1071 /* schedule NAPI in case of 1072 * - rx IRQ 1073 * - state change IRQ 1074 * - bus error IRQ and bus error reporting 1075 */ 1076 if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { 1077 cdev->irqstatus = ir; 1078 m_can_disable_all_interrupts(cdev); 1079 if (!cdev->is_peripheral) 1080 napi_schedule(&cdev->napi); 1081 else if (m_can_rx_peripheral(dev) < 0) 1082 goto out_fail; 1083 } 1084 1085 if (cdev->version == 30) { 1086 if (ir & IR_TC) { 1087 /* Transmission Complete Interrupt*/ 1088 u32 timestamp = 0; 1089 1090 if (cdev->is_peripheral) 1091 timestamp = m_can_get_timestamp(cdev); 1092 m_can_tx_update_stats(cdev, 0, timestamp); 1093 1094 can_led_event(dev, CAN_LED_EVENT_TX); 1095 netif_wake_queue(dev); 1096 } 1097 } else { 1098 if (ir & IR_TEFN) { 1099 /* New TX FIFO Element arrived */ 1100 if (m_can_echo_tx_event(dev) != 0) 1101 goto out_fail; 1102 1103 can_led_event(dev, CAN_LED_EVENT_TX); 1104 if (netif_queue_stopped(dev) && 1105 !m_can_tx_fifo_full(cdev)) 1106 netif_wake_queue(dev); 1107 } 1108 } 1109 1110 if (cdev->is_peripheral) 1111 can_rx_offload_threaded_irq_finish(&cdev->offload); 1112 1113 return IRQ_HANDLED; 1114 1115 out_fail: 1116 m_can_disable_all_interrupts(cdev); 1117 return IRQ_HANDLED; 1118 } 1119 1120 static const struct can_bittiming_const m_can_bittiming_const_30X = { 1121 .name = KBUILD_MODNAME, 1122 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1123 .tseg1_max = 64, 1124 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1125 .tseg2_max = 16, 1126 .sjw_max = 16, 1127 .brp_min = 1, 1128 .brp_max = 1024, 1129 .brp_inc = 1, 1130 }; 1131 1132 static const struct can_bittiming_const m_can_data_bittiming_const_30X = { 1133 .name = KBUILD_MODNAME, 1134 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1135 .tseg1_max = 16, 1136 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1137 .tseg2_max = 8, 1138 .sjw_max = 4, 1139 .brp_min = 1, 1140 .brp_max = 32, 1141 .brp_inc = 1, 1142 }; 1143 1144 static const struct can_bittiming_const m_can_bittiming_const_31X = { 1145 .name = KBUILD_MODNAME, 1146 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1147 .tseg1_max = 256, 1148 .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ 1149 .tseg2_max = 128, 1150 .sjw_max = 128, 1151 .brp_min = 1, 1152 .brp_max = 512, 1153 .brp_inc = 1, 1154 }; 1155 1156 static const struct can_bittiming_const m_can_data_bittiming_const_31X = { 1157 .name = KBUILD_MODNAME, 1158 .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ 1159 .tseg1_max = 32, 1160 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1161 .tseg2_max = 16, 1162 .sjw_max = 16, 1163 .brp_min = 1, 1164 .brp_max = 32, 1165 .brp_inc = 1, 1166 }; 1167 1168 static int m_can_set_bittiming(struct net_device *dev) 1169 { 1170 struct m_can_classdev *cdev = netdev_priv(dev); 1171 const struct can_bittiming *bt = &cdev->can.bittiming; 1172 const struct can_bittiming *dbt = &cdev->can.data_bittiming; 1173 u16 brp, sjw, tseg1, tseg2; 1174 u32 reg_btp; 1175 1176 brp = bt->brp - 1; 1177 sjw = bt->sjw - 1; 1178 tseg1 = bt->prop_seg + bt->phase_seg1 - 1; 1179 tseg2 = bt->phase_seg2 - 1; 1180 reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) | 1181 FIELD_PREP(NBTP_NSJW_MASK, sjw) | 1182 FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) | 1183 FIELD_PREP(NBTP_NTSEG2_MASK, tseg2); 1184 m_can_write(cdev, M_CAN_NBTP, reg_btp); 1185 1186 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { 1187 reg_btp = 0; 1188 brp = dbt->brp - 1; 1189 sjw = dbt->sjw - 1; 1190 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; 1191 tseg2 = dbt->phase_seg2 - 1; 1192 1193 /* TDC is only needed for bitrates beyond 2.5 MBit/s. 1194 * This is mentioned in the "Bit Time Requirements for CAN FD" 1195 * paper presented at the International CAN Conference 2013 1196 */ 1197 if (dbt->bitrate > 2500000) { 1198 u32 tdco, ssp; 1199 1200 /* Use the same value of secondary sampling point 1201 * as the data sampling point 1202 */ 1203 ssp = dbt->sample_point; 1204 1205 /* Equation based on Bosch's M_CAN User Manual's 1206 * Transmitter Delay Compensation Section 1207 */ 1208 tdco = (cdev->can.clock.freq / 1000) * 1209 ssp / dbt->bitrate; 1210 1211 /* Max valid TDCO value is 127 */ 1212 if (tdco > 127) { 1213 netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n", 1214 tdco); 1215 tdco = 127; 1216 } 1217 1218 reg_btp |= DBTP_TDC; 1219 m_can_write(cdev, M_CAN_TDCR, 1220 FIELD_PREP(TDCR_TDCO_MASK, tdco)); 1221 } 1222 1223 reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) | 1224 FIELD_PREP(DBTP_DSJW_MASK, sjw) | 1225 FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) | 1226 FIELD_PREP(DBTP_DTSEG2_MASK, tseg2); 1227 1228 m_can_write(cdev, M_CAN_DBTP, reg_btp); 1229 } 1230 1231 return 0; 1232 } 1233 1234 /* Configure M_CAN chip: 1235 * - set rx buffer/fifo element size 1236 * - configure rx fifo 1237 * - accept non-matching frame into fifo 0 1238 * - configure tx buffer 1239 * - >= v3.1.x: TX FIFO is used 1240 * - configure mode 1241 * - setup bittiming 1242 * - configure timestamp generation 1243 */ 1244 static void m_can_chip_config(struct net_device *dev) 1245 { 1246 struct m_can_classdev *cdev = netdev_priv(dev); 1247 u32 cccr, test; 1248 1249 m_can_config_endisable(cdev, true); 1250 1251 /* RX Buffer/FIFO Element Size 64 bytes data field */ 1252 m_can_write(cdev, M_CAN_RXESC, 1253 FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) | 1254 FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) | 1255 FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B)); 1256 1257 /* Accept Non-matching Frames Into FIFO 0 */ 1258 m_can_write(cdev, M_CAN_GFC, 0x0); 1259 1260 if (cdev->version == 30) { 1261 /* only support one Tx Buffer currently */ 1262 m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) | 1263 cdev->mcfg[MRAM_TXB].off); 1264 } else { 1265 /* TX FIFO is used for newer IP Core versions */ 1266 m_can_write(cdev, M_CAN_TXBC, 1267 FIELD_PREP(TXBC_TFQS_MASK, 1268 cdev->mcfg[MRAM_TXB].num) | 1269 cdev->mcfg[MRAM_TXB].off); 1270 } 1271 1272 /* support 64 bytes payload */ 1273 m_can_write(cdev, M_CAN_TXESC, 1274 FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B)); 1275 1276 /* TX Event FIFO */ 1277 if (cdev->version == 30) { 1278 m_can_write(cdev, M_CAN_TXEFC, 1279 FIELD_PREP(TXEFC_EFS_MASK, 1) | 1280 cdev->mcfg[MRAM_TXE].off); 1281 } else { 1282 /* Full TX Event FIFO is used */ 1283 m_can_write(cdev, M_CAN_TXEFC, 1284 FIELD_PREP(TXEFC_EFS_MASK, 1285 cdev->mcfg[MRAM_TXE].num) | 1286 cdev->mcfg[MRAM_TXE].off); 1287 } 1288 1289 /* rx fifo configuration, blocking mode, fifo size 1 */ 1290 m_can_write(cdev, M_CAN_RXF0C, 1291 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | 1292 cdev->mcfg[MRAM_RXF0].off); 1293 1294 m_can_write(cdev, M_CAN_RXF1C, 1295 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) | 1296 cdev->mcfg[MRAM_RXF1].off); 1297 1298 cccr = m_can_read(cdev, M_CAN_CCCR); 1299 test = m_can_read(cdev, M_CAN_TEST); 1300 test &= ~TEST_LBCK; 1301 if (cdev->version == 30) { 1302 /* Version 3.0.x */ 1303 1304 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | 1305 FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) | 1306 FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK))); 1307 1308 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) 1309 cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS); 1310 1311 } else { 1312 /* Version 3.1.x or 3.2.x */ 1313 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | 1314 CCCR_NISO | CCCR_DAR); 1315 1316 /* Only 3.2.x has NISO Bit implemented */ 1317 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 1318 cccr |= CCCR_NISO; 1319 1320 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) 1321 cccr |= (CCCR_BRSE | CCCR_FDOE); 1322 } 1323 1324 /* Loopback Mode */ 1325 if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 1326 cccr |= CCCR_TEST | CCCR_MON; 1327 test |= TEST_LBCK; 1328 } 1329 1330 /* Enable Monitoring (all versions) */ 1331 if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 1332 cccr |= CCCR_MON; 1333 1334 /* Disable Auto Retransmission (all versions) */ 1335 if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 1336 cccr |= CCCR_DAR; 1337 1338 /* Write config */ 1339 m_can_write(cdev, M_CAN_CCCR, cccr); 1340 m_can_write(cdev, M_CAN_TEST, test); 1341 1342 /* Enable interrupts */ 1343 m_can_write(cdev, M_CAN_IR, IR_ALL_INT); 1344 if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 1345 if (cdev->version == 30) 1346 m_can_write(cdev, M_CAN_IE, IR_ALL_INT & 1347 ~(IR_ERR_LEC_30X)); 1348 else 1349 m_can_write(cdev, M_CAN_IE, IR_ALL_INT & 1350 ~(IR_ERR_LEC_31X)); 1351 else 1352 m_can_write(cdev, M_CAN_IE, IR_ALL_INT); 1353 1354 /* route all interrupts to INT0 */ 1355 m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); 1356 1357 /* set bittiming params */ 1358 m_can_set_bittiming(dev); 1359 1360 /* enable internal timestamp generation, with a prescalar of 16. The 1361 * prescalar is applied to the nominal bit timing 1362 */ 1363 m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf)); 1364 1365 m_can_config_endisable(cdev, false); 1366 1367 if (cdev->ops->init) 1368 cdev->ops->init(cdev); 1369 } 1370 1371 static void m_can_start(struct net_device *dev) 1372 { 1373 struct m_can_classdev *cdev = netdev_priv(dev); 1374 1375 /* basic m_can configuration */ 1376 m_can_chip_config(dev); 1377 1378 cdev->can.state = CAN_STATE_ERROR_ACTIVE; 1379 1380 m_can_enable_all_interrupts(cdev); 1381 } 1382 1383 static int m_can_set_mode(struct net_device *dev, enum can_mode mode) 1384 { 1385 switch (mode) { 1386 case CAN_MODE_START: 1387 m_can_clean(dev); 1388 m_can_start(dev); 1389 netif_wake_queue(dev); 1390 break; 1391 default: 1392 return -EOPNOTSUPP; 1393 } 1394 1395 return 0; 1396 } 1397 1398 /* Checks core release number of M_CAN 1399 * returns 0 if an unsupported device is detected 1400 * else it returns the release and step coded as: 1401 * return value = 10 * <release> + 1 * <step> 1402 */ 1403 static int m_can_check_core_release(struct m_can_classdev *cdev) 1404 { 1405 u32 crel_reg; 1406 u8 rel; 1407 u8 step; 1408 int res; 1409 1410 /* Read Core Release Version and split into version number 1411 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; 1412 */ 1413 crel_reg = m_can_read(cdev, M_CAN_CREL); 1414 rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg); 1415 step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg); 1416 1417 if (rel == 3) { 1418 /* M_CAN v3.x.y: create return value */ 1419 res = 30 + step; 1420 } else { 1421 /* Unsupported M_CAN version */ 1422 res = 0; 1423 } 1424 1425 return res; 1426 } 1427 1428 /* Selectable Non ISO support only in version 3.2.x 1429 * This function checks if the bit is writable. 1430 */ 1431 static bool m_can_niso_supported(struct m_can_classdev *cdev) 1432 { 1433 u32 cccr_reg, cccr_poll = 0; 1434 int niso_timeout = -ETIMEDOUT; 1435 int i; 1436 1437 m_can_config_endisable(cdev, true); 1438 cccr_reg = m_can_read(cdev, M_CAN_CCCR); 1439 cccr_reg |= CCCR_NISO; 1440 m_can_write(cdev, M_CAN_CCCR, cccr_reg); 1441 1442 for (i = 0; i <= 10; i++) { 1443 cccr_poll = m_can_read(cdev, M_CAN_CCCR); 1444 if (cccr_poll == cccr_reg) { 1445 niso_timeout = 0; 1446 break; 1447 } 1448 1449 usleep_range(1, 5); 1450 } 1451 1452 /* Clear NISO */ 1453 cccr_reg &= ~(CCCR_NISO); 1454 m_can_write(cdev, M_CAN_CCCR, cccr_reg); 1455 1456 m_can_config_endisable(cdev, false); 1457 1458 /* return false if time out (-ETIMEDOUT), else return true */ 1459 return !niso_timeout; 1460 } 1461 1462 static int m_can_dev_setup(struct m_can_classdev *cdev) 1463 { 1464 struct net_device *dev = cdev->net; 1465 int m_can_version, err; 1466 1467 m_can_version = m_can_check_core_release(cdev); 1468 /* return if unsupported version */ 1469 if (!m_can_version) { 1470 dev_err(cdev->dev, "Unsupported version number: %2d", 1471 m_can_version); 1472 return -EINVAL; 1473 } 1474 1475 if (!cdev->is_peripheral) 1476 netif_napi_add(dev, &cdev->napi, 1477 m_can_poll, M_CAN_NAPI_WEIGHT); 1478 1479 /* Shared properties of all M_CAN versions */ 1480 cdev->version = m_can_version; 1481 cdev->can.do_set_mode = m_can_set_mode; 1482 cdev->can.do_get_berr_counter = m_can_get_berr_counter; 1483 1484 /* Set M_CAN supported operations */ 1485 cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1486 CAN_CTRLMODE_LISTENONLY | 1487 CAN_CTRLMODE_BERR_REPORTING | 1488 CAN_CTRLMODE_FD | 1489 CAN_CTRLMODE_ONE_SHOT; 1490 1491 /* Set properties depending on M_CAN version */ 1492 switch (cdev->version) { 1493 case 30: 1494 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ 1495 err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1496 if (err) 1497 return err; 1498 cdev->can.bittiming_const = cdev->bit_timing ? 1499 cdev->bit_timing : &m_can_bittiming_const_30X; 1500 1501 cdev->can.data_bittiming_const = cdev->data_timing ? 1502 cdev->data_timing : 1503 &m_can_data_bittiming_const_30X; 1504 break; 1505 case 31: 1506 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ 1507 err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1508 if (err) 1509 return err; 1510 cdev->can.bittiming_const = cdev->bit_timing ? 1511 cdev->bit_timing : &m_can_bittiming_const_31X; 1512 1513 cdev->can.data_bittiming_const = cdev->data_timing ? 1514 cdev->data_timing : 1515 &m_can_data_bittiming_const_31X; 1516 break; 1517 case 32: 1518 case 33: 1519 /* Support both MCAN version v3.2.x and v3.3.0 */ 1520 cdev->can.bittiming_const = cdev->bit_timing ? 1521 cdev->bit_timing : &m_can_bittiming_const_31X; 1522 1523 cdev->can.data_bittiming_const = cdev->data_timing ? 1524 cdev->data_timing : 1525 &m_can_data_bittiming_const_31X; 1526 1527 cdev->can.ctrlmode_supported |= 1528 (m_can_niso_supported(cdev) ? 1529 CAN_CTRLMODE_FD_NON_ISO : 0); 1530 break; 1531 default: 1532 dev_err(cdev->dev, "Unsupported version number: %2d", 1533 cdev->version); 1534 return -EINVAL; 1535 } 1536 1537 if (cdev->ops->init) 1538 cdev->ops->init(cdev); 1539 1540 return 0; 1541 } 1542 1543 static void m_can_stop(struct net_device *dev) 1544 { 1545 struct m_can_classdev *cdev = netdev_priv(dev); 1546 1547 /* disable all interrupts */ 1548 m_can_disable_all_interrupts(cdev); 1549 1550 /* Set init mode to disengage from the network */ 1551 m_can_config_endisable(cdev, true); 1552 1553 /* set the state as STOPPED */ 1554 cdev->can.state = CAN_STATE_STOPPED; 1555 } 1556 1557 static int m_can_close(struct net_device *dev) 1558 { 1559 struct m_can_classdev *cdev = netdev_priv(dev); 1560 1561 netif_stop_queue(dev); 1562 1563 if (!cdev->is_peripheral) 1564 napi_disable(&cdev->napi); 1565 1566 m_can_stop(dev); 1567 m_can_clk_stop(cdev); 1568 free_irq(dev->irq, dev); 1569 1570 if (cdev->is_peripheral) { 1571 cdev->tx_skb = NULL; 1572 destroy_workqueue(cdev->tx_wq); 1573 cdev->tx_wq = NULL; 1574 } 1575 1576 if (cdev->is_peripheral) 1577 can_rx_offload_disable(&cdev->offload); 1578 1579 close_candev(dev); 1580 can_led_event(dev, CAN_LED_EVENT_STOP); 1581 1582 phy_power_off(cdev->transceiver); 1583 1584 return 0; 1585 } 1586 1587 static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) 1588 { 1589 struct m_can_classdev *cdev = netdev_priv(dev); 1590 /*get wrap around for loopback skb index */ 1591 unsigned int wrap = cdev->can.echo_skb_max; 1592 int next_idx; 1593 1594 /* calculate next index */ 1595 next_idx = (++putidx >= wrap ? 0 : putidx); 1596 1597 /* check if occupied */ 1598 return !!cdev->can.echo_skb[next_idx]; 1599 } 1600 1601 static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) 1602 { 1603 struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; 1604 struct net_device *dev = cdev->net; 1605 struct sk_buff *skb = cdev->tx_skb; 1606 struct id_and_dlc fifo_header; 1607 u32 cccr, fdflags; 1608 int err; 1609 int putidx; 1610 1611 cdev->tx_skb = NULL; 1612 1613 /* Generate ID field for TX buffer Element */ 1614 /* Common to all supported M_CAN versions */ 1615 if (cf->can_id & CAN_EFF_FLAG) { 1616 fifo_header.id = cf->can_id & CAN_EFF_MASK; 1617 fifo_header.id |= TX_BUF_XTD; 1618 } else { 1619 fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); 1620 } 1621 1622 if (cf->can_id & CAN_RTR_FLAG) 1623 fifo_header.id |= TX_BUF_RTR; 1624 1625 if (cdev->version == 30) { 1626 netif_stop_queue(dev); 1627 1628 fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; 1629 1630 /* Write the frame ID, DLC, and payload to the FIFO element. */ 1631 err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2); 1632 if (err) 1633 goto out_fail; 1634 1635 err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, 1636 cf->data, DIV_ROUND_UP(cf->len, 4)); 1637 if (err) 1638 goto out_fail; 1639 1640 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { 1641 cccr = m_can_read(cdev, M_CAN_CCCR); 1642 cccr &= ~CCCR_CMR_MASK; 1643 if (can_is_canfd_skb(skb)) { 1644 if (cf->flags & CANFD_BRS) 1645 cccr |= FIELD_PREP(CCCR_CMR_MASK, 1646 CCCR_CMR_CANFD_BRS); 1647 else 1648 cccr |= FIELD_PREP(CCCR_CMR_MASK, 1649 CCCR_CMR_CANFD); 1650 } else { 1651 cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN); 1652 } 1653 m_can_write(cdev, M_CAN_CCCR, cccr); 1654 } 1655 m_can_write(cdev, M_CAN_TXBTIE, 0x1); 1656 1657 can_put_echo_skb(skb, dev, 0, 0); 1658 1659 m_can_write(cdev, M_CAN_TXBAR, 0x1); 1660 /* End of xmit function for version 3.0.x */ 1661 } else { 1662 /* Transmit routine for version >= v3.1.x */ 1663 1664 /* Check if FIFO full */ 1665 if (m_can_tx_fifo_full(cdev)) { 1666 /* This shouldn't happen */ 1667 netif_stop_queue(dev); 1668 netdev_warn(dev, 1669 "TX queue active although FIFO is full."); 1670 1671 if (cdev->is_peripheral) { 1672 kfree_skb(skb); 1673 dev->stats.tx_dropped++; 1674 return NETDEV_TX_OK; 1675 } else { 1676 return NETDEV_TX_BUSY; 1677 } 1678 } 1679 1680 /* get put index for frame */ 1681 putidx = FIELD_GET(TXFQS_TFQPI_MASK, 1682 m_can_read(cdev, M_CAN_TXFQS)); 1683 1684 /* Construct DLC Field, with CAN-FD configuration. 1685 * Use the put index of the fifo as the message marker, 1686 * used in the TX interrupt for sending the correct echo frame. 1687 */ 1688 1689 /* get CAN FD configuration of frame */ 1690 fdflags = 0; 1691 if (can_is_canfd_skb(skb)) { 1692 fdflags |= TX_BUF_FDF; 1693 if (cf->flags & CANFD_BRS) 1694 fdflags |= TX_BUF_BRS; 1695 } 1696 1697 fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | 1698 FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | 1699 fdflags | TX_BUF_EFC; 1700 err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2); 1701 if (err) 1702 goto out_fail; 1703 1704 err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA, 1705 cf->data, DIV_ROUND_UP(cf->len, 4)); 1706 if (err) 1707 goto out_fail; 1708 1709 /* Push loopback echo. 1710 * Will be looped back on TX interrupt based on message marker 1711 */ 1712 can_put_echo_skb(skb, dev, putidx, 0); 1713 1714 /* Enable TX FIFO element to start transfer */ 1715 m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); 1716 1717 /* stop network queue if fifo full */ 1718 if (m_can_tx_fifo_full(cdev) || 1719 m_can_next_echo_skb_occupied(dev, putidx)) 1720 netif_stop_queue(dev); 1721 } 1722 1723 return NETDEV_TX_OK; 1724 1725 out_fail: 1726 netdev_err(dev, "FIFO write returned %d\n", err); 1727 m_can_disable_all_interrupts(cdev); 1728 return NETDEV_TX_BUSY; 1729 } 1730 1731 static void m_can_tx_work_queue(struct work_struct *ws) 1732 { 1733 struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, 1734 tx_work); 1735 1736 m_can_tx_handler(cdev); 1737 } 1738 1739 static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, 1740 struct net_device *dev) 1741 { 1742 struct m_can_classdev *cdev = netdev_priv(dev); 1743 1744 if (can_dropped_invalid_skb(dev, skb)) 1745 return NETDEV_TX_OK; 1746 1747 if (cdev->is_peripheral) { 1748 if (cdev->tx_skb) { 1749 netdev_err(dev, "hard_xmit called while tx busy\n"); 1750 return NETDEV_TX_BUSY; 1751 } 1752 1753 if (cdev->can.state == CAN_STATE_BUS_OFF) { 1754 m_can_clean(dev); 1755 } else { 1756 /* Need to stop the queue to avoid numerous requests 1757 * from being sent. Suggested improvement is to create 1758 * a queueing mechanism that will queue the skbs and 1759 * process them in order. 1760 */ 1761 cdev->tx_skb = skb; 1762 netif_stop_queue(cdev->net); 1763 queue_work(cdev->tx_wq, &cdev->tx_work); 1764 } 1765 } else { 1766 cdev->tx_skb = skb; 1767 return m_can_tx_handler(cdev); 1768 } 1769 1770 return NETDEV_TX_OK; 1771 } 1772 1773 static int m_can_open(struct net_device *dev) 1774 { 1775 struct m_can_classdev *cdev = netdev_priv(dev); 1776 int err; 1777 1778 err = phy_power_on(cdev->transceiver); 1779 if (err) 1780 return err; 1781 1782 err = m_can_clk_start(cdev); 1783 if (err) 1784 goto out_phy_power_off; 1785 1786 /* open the can device */ 1787 err = open_candev(dev); 1788 if (err) { 1789 netdev_err(dev, "failed to open can device\n"); 1790 goto exit_disable_clks; 1791 } 1792 1793 if (cdev->is_peripheral) 1794 can_rx_offload_enable(&cdev->offload); 1795 1796 /* register interrupt handler */ 1797 if (cdev->is_peripheral) { 1798 cdev->tx_skb = NULL; 1799 cdev->tx_wq = alloc_workqueue("mcan_wq", 1800 WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); 1801 if (!cdev->tx_wq) { 1802 err = -ENOMEM; 1803 goto out_wq_fail; 1804 } 1805 1806 INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); 1807 1808 err = request_threaded_irq(dev->irq, NULL, m_can_isr, 1809 IRQF_ONESHOT, 1810 dev->name, dev); 1811 } else { 1812 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, 1813 dev); 1814 } 1815 1816 if (err < 0) { 1817 netdev_err(dev, "failed to request interrupt\n"); 1818 goto exit_irq_fail; 1819 } 1820 1821 /* start the m_can controller */ 1822 m_can_start(dev); 1823 1824 can_led_event(dev, CAN_LED_EVENT_OPEN); 1825 1826 if (!cdev->is_peripheral) 1827 napi_enable(&cdev->napi); 1828 1829 netif_start_queue(dev); 1830 1831 return 0; 1832 1833 exit_irq_fail: 1834 if (cdev->is_peripheral) 1835 destroy_workqueue(cdev->tx_wq); 1836 out_wq_fail: 1837 if (cdev->is_peripheral) 1838 can_rx_offload_disable(&cdev->offload); 1839 close_candev(dev); 1840 exit_disable_clks: 1841 m_can_clk_stop(cdev); 1842 out_phy_power_off: 1843 phy_power_off(cdev->transceiver); 1844 return err; 1845 } 1846 1847 static const struct net_device_ops m_can_netdev_ops = { 1848 .ndo_open = m_can_open, 1849 .ndo_stop = m_can_close, 1850 .ndo_start_xmit = m_can_start_xmit, 1851 .ndo_change_mtu = can_change_mtu, 1852 }; 1853 1854 static int register_m_can_dev(struct net_device *dev) 1855 { 1856 dev->flags |= IFF_ECHO; /* we support local echo */ 1857 dev->netdev_ops = &m_can_netdev_ops; 1858 1859 return register_candev(dev); 1860 } 1861 1862 static void m_can_of_parse_mram(struct m_can_classdev *cdev, 1863 const u32 *mram_config_vals) 1864 { 1865 cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; 1866 cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; 1867 cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + 1868 cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; 1869 cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; 1870 cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + 1871 cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; 1872 cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & 1873 FIELD_MAX(RXFC_FS_MASK); 1874 cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + 1875 cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; 1876 cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & 1877 FIELD_MAX(RXFC_FS_MASK); 1878 cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + 1879 cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; 1880 cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; 1881 cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + 1882 cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; 1883 cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; 1884 cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + 1885 cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; 1886 cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & 1887 FIELD_MAX(TXBC_NDTB_MASK); 1888 1889 dev_dbg(cdev->dev, 1890 "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", 1891 cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, 1892 cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, 1893 cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, 1894 cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, 1895 cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, 1896 cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, 1897 cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); 1898 } 1899 1900 int m_can_init_ram(struct m_can_classdev *cdev) 1901 { 1902 int end, i, start; 1903 int err = 0; 1904 1905 /* initialize the entire Message RAM in use to avoid possible 1906 * ECC/parity checksum errors when reading an uninitialized buffer 1907 */ 1908 start = cdev->mcfg[MRAM_SIDF].off; 1909 end = cdev->mcfg[MRAM_TXB].off + 1910 cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; 1911 1912 for (i = start; i < end; i += 4) { 1913 err = m_can_fifo_write_no_off(cdev, i, 0x0); 1914 if (err) 1915 break; 1916 } 1917 1918 return err; 1919 } 1920 EXPORT_SYMBOL_GPL(m_can_init_ram); 1921 1922 int m_can_class_get_clocks(struct m_can_classdev *cdev) 1923 { 1924 int ret = 0; 1925 1926 cdev->hclk = devm_clk_get(cdev->dev, "hclk"); 1927 cdev->cclk = devm_clk_get(cdev->dev, "cclk"); 1928 1929 if (IS_ERR(cdev->cclk)) { 1930 dev_err(cdev->dev, "no clock found\n"); 1931 ret = -ENODEV; 1932 } 1933 1934 return ret; 1935 } 1936 EXPORT_SYMBOL_GPL(m_can_class_get_clocks); 1937 1938 struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, 1939 int sizeof_priv) 1940 { 1941 struct m_can_classdev *class_dev = NULL; 1942 u32 mram_config_vals[MRAM_CFG_LEN]; 1943 struct net_device *net_dev; 1944 u32 tx_fifo_size; 1945 int ret; 1946 1947 ret = fwnode_property_read_u32_array(dev_fwnode(dev), 1948 "bosch,mram-cfg", 1949 mram_config_vals, 1950 sizeof(mram_config_vals) / 4); 1951 if (ret) { 1952 dev_err(dev, "Could not get Message RAM configuration."); 1953 goto out; 1954 } 1955 1956 /* Get TX FIFO size 1957 * Defines the total amount of echo buffers for loopback 1958 */ 1959 tx_fifo_size = mram_config_vals[7]; 1960 1961 /* allocate the m_can device */ 1962 net_dev = alloc_candev(sizeof_priv, tx_fifo_size); 1963 if (!net_dev) { 1964 dev_err(dev, "Failed to allocate CAN device"); 1965 goto out; 1966 } 1967 1968 class_dev = netdev_priv(net_dev); 1969 class_dev->net = net_dev; 1970 class_dev->dev = dev; 1971 SET_NETDEV_DEV(net_dev, dev); 1972 1973 m_can_of_parse_mram(class_dev, mram_config_vals); 1974 out: 1975 return class_dev; 1976 } 1977 EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); 1978 1979 void m_can_class_free_dev(struct net_device *net) 1980 { 1981 free_candev(net); 1982 } 1983 EXPORT_SYMBOL_GPL(m_can_class_free_dev); 1984 1985 int m_can_class_register(struct m_can_classdev *cdev) 1986 { 1987 int ret; 1988 1989 if (cdev->pm_clock_support) { 1990 ret = m_can_clk_start(cdev); 1991 if (ret) 1992 return ret; 1993 } 1994 1995 if (cdev->is_peripheral) { 1996 ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, 1997 M_CAN_NAPI_WEIGHT); 1998 if (ret) 1999 goto clk_disable; 2000 } 2001 2002 ret = m_can_dev_setup(cdev); 2003 if (ret) 2004 goto rx_offload_del; 2005 2006 ret = register_m_can_dev(cdev->net); 2007 if (ret) { 2008 dev_err(cdev->dev, "registering %s failed (err=%d)\n", 2009 cdev->net->name, ret); 2010 goto rx_offload_del; 2011 } 2012 2013 devm_can_led_init(cdev->net); 2014 2015 of_can_transceiver(cdev->net); 2016 2017 dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", 2018 KBUILD_MODNAME, cdev->net->irq, cdev->version); 2019 2020 /* Probe finished 2021 * Stop clocks. They will be reactivated once the M_CAN device is opened 2022 */ 2023 m_can_clk_stop(cdev); 2024 2025 return 0; 2026 2027 rx_offload_del: 2028 if (cdev->is_peripheral) 2029 can_rx_offload_del(&cdev->offload); 2030 clk_disable: 2031 m_can_clk_stop(cdev); 2032 2033 return ret; 2034 } 2035 EXPORT_SYMBOL_GPL(m_can_class_register); 2036 2037 void m_can_class_unregister(struct m_can_classdev *cdev) 2038 { 2039 if (cdev->is_peripheral) 2040 can_rx_offload_del(&cdev->offload); 2041 unregister_candev(cdev->net); 2042 } 2043 EXPORT_SYMBOL_GPL(m_can_class_unregister); 2044 2045 int m_can_class_suspend(struct device *dev) 2046 { 2047 struct m_can_classdev *cdev = dev_get_drvdata(dev); 2048 struct net_device *ndev = cdev->net; 2049 2050 if (netif_running(ndev)) { 2051 netif_stop_queue(ndev); 2052 netif_device_detach(ndev); 2053 m_can_stop(ndev); 2054 m_can_clk_stop(cdev); 2055 } 2056 2057 pinctrl_pm_select_sleep_state(dev); 2058 2059 cdev->can.state = CAN_STATE_SLEEPING; 2060 2061 return 0; 2062 } 2063 EXPORT_SYMBOL_GPL(m_can_class_suspend); 2064 2065 int m_can_class_resume(struct device *dev) 2066 { 2067 struct m_can_classdev *cdev = dev_get_drvdata(dev); 2068 struct net_device *ndev = cdev->net; 2069 2070 pinctrl_pm_select_default_state(dev); 2071 2072 cdev->can.state = CAN_STATE_ERROR_ACTIVE; 2073 2074 if (netif_running(ndev)) { 2075 int ret; 2076 2077 ret = m_can_clk_start(cdev); 2078 if (ret) 2079 return ret; 2080 2081 m_can_init_ram(cdev); 2082 m_can_start(ndev); 2083 netif_device_attach(ndev); 2084 netif_start_queue(ndev); 2085 } 2086 2087 return 0; 2088 } 2089 EXPORT_SYMBOL_GPL(m_can_class_resume); 2090 2091 MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); 2092 MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); 2093 MODULE_LICENSE("GPL v2"); 2094 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); 2095