1 // SPDX-License-Identifier: GPL-2.0 2 // CAN bus driver for Bosch M_CAN controller 3 // Copyright (C) 2014 Freescale Semiconductor, Inc. 4 // Dong Aisheng <b29396@freescale.com> 5 // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ 6 7 /* Bosch M_CAN user manual can be obtained from: 8 * https://github.com/linux-can/can-doc/tree/master/m_can 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/ethtool.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/of.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/iopoll.h> 23 #include <linux/can/dev.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/phy/phy.h> 26 27 #include "m_can.h" 28 29 /* registers definition */ 30 enum m_can_reg { 31 M_CAN_CREL = 0x0, 32 M_CAN_ENDN = 0x4, 33 M_CAN_CUST = 0x8, 34 M_CAN_DBTP = 0xc, 35 M_CAN_TEST = 0x10, 36 M_CAN_RWD = 0x14, 37 M_CAN_CCCR = 0x18, 38 M_CAN_NBTP = 0x1c, 39 M_CAN_TSCC = 0x20, 40 M_CAN_TSCV = 0x24, 41 M_CAN_TOCC = 0x28, 42 M_CAN_TOCV = 0x2c, 43 M_CAN_ECR = 0x40, 44 M_CAN_PSR = 0x44, 45 /* TDCR Register only available for version >=3.1.x */ 46 M_CAN_TDCR = 0x48, 47 M_CAN_IR = 0x50, 48 M_CAN_IE = 0x54, 49 M_CAN_ILS = 0x58, 50 M_CAN_ILE = 0x5c, 51 M_CAN_GFC = 0x80, 52 M_CAN_SIDFC = 0x84, 53 M_CAN_XIDFC = 0x88, 54 M_CAN_XIDAM = 0x90, 55 M_CAN_HPMS = 0x94, 56 M_CAN_NDAT1 = 0x98, 57 M_CAN_NDAT2 = 0x9c, 58 M_CAN_RXF0C = 0xa0, 59 M_CAN_RXF0S = 0xa4, 60 M_CAN_RXF0A = 0xa8, 61 M_CAN_RXBC = 0xac, 62 M_CAN_RXF1C = 0xb0, 63 M_CAN_RXF1S = 0xb4, 64 M_CAN_RXF1A = 0xb8, 65 M_CAN_RXESC = 0xbc, 66 M_CAN_TXBC = 0xc0, 67 M_CAN_TXFQS = 0xc4, 68 M_CAN_TXESC = 0xc8, 69 M_CAN_TXBRP = 0xcc, 70 M_CAN_TXBAR = 0xd0, 71 M_CAN_TXBCR = 0xd4, 72 M_CAN_TXBTO = 0xd8, 73 M_CAN_TXBCF = 0xdc, 74 M_CAN_TXBTIE = 0xe0, 75 M_CAN_TXBCIE = 0xe4, 76 M_CAN_TXEFC = 0xf0, 77 M_CAN_TXEFS = 0xf4, 78 M_CAN_TXEFA = 0xf8, 79 }; 80 81 /* message ram configuration data length */ 82 #define MRAM_CFG_LEN 8 83 84 /* Core Release Register (CREL) */ 85 #define CREL_REL_MASK GENMASK(31, 28) 86 #define CREL_STEP_MASK GENMASK(27, 24) 87 #define CREL_SUBSTEP_MASK GENMASK(23, 20) 88 89 /* Data Bit Timing & Prescaler Register (DBTP) */ 90 #define DBTP_TDC BIT(23) 91 #define DBTP_DBRP_MASK GENMASK(20, 16) 92 #define DBTP_DTSEG1_MASK GENMASK(12, 8) 93 #define DBTP_DTSEG2_MASK GENMASK(7, 4) 94 #define DBTP_DSJW_MASK GENMASK(3, 0) 95 96 /* Transmitter Delay Compensation Register (TDCR) */ 97 #define TDCR_TDCO_MASK GENMASK(14, 8) 98 #define TDCR_TDCF_MASK GENMASK(6, 0) 99 100 /* Test Register (TEST) */ 101 #define TEST_LBCK BIT(4) 102 103 /* CC Control Register (CCCR) */ 104 #define CCCR_TXP BIT(14) 105 #define CCCR_TEST BIT(7) 106 #define CCCR_DAR BIT(6) 107 #define CCCR_MON BIT(5) 108 #define CCCR_CSR BIT(4) 109 #define CCCR_CSA BIT(3) 110 #define CCCR_ASM BIT(2) 111 #define CCCR_CCE BIT(1) 112 #define CCCR_INIT BIT(0) 113 /* for version 3.0.x */ 114 #define CCCR_CMR_MASK GENMASK(11, 10) 115 #define CCCR_CMR_CANFD 0x1 116 #define CCCR_CMR_CANFD_BRS 0x2 117 #define CCCR_CMR_CAN 0x3 118 #define CCCR_CME_MASK GENMASK(9, 8) 119 #define CCCR_CME_CAN 0 120 #define CCCR_CME_CANFD 0x1 121 #define CCCR_CME_CANFD_BRS 0x2 122 /* for version >=3.1.x */ 123 #define CCCR_EFBI BIT(13) 124 #define CCCR_PXHD BIT(12) 125 #define CCCR_BRSE BIT(9) 126 #define CCCR_FDOE BIT(8) 127 /* for version >=3.2.x */ 128 #define CCCR_NISO BIT(15) 129 /* for version >=3.3.x */ 130 #define CCCR_WMM BIT(11) 131 #define CCCR_UTSU BIT(10) 132 133 /* Nominal Bit Timing & Prescaler Register (NBTP) */ 134 #define NBTP_NSJW_MASK GENMASK(31, 25) 135 #define NBTP_NBRP_MASK GENMASK(24, 16) 136 #define NBTP_NTSEG1_MASK GENMASK(15, 8) 137 #define NBTP_NTSEG2_MASK GENMASK(6, 0) 138 139 /* Timestamp Counter Configuration Register (TSCC) */ 140 #define TSCC_TCP_MASK GENMASK(19, 16) 141 #define TSCC_TSS_MASK GENMASK(1, 0) 142 #define TSCC_TSS_DISABLE 0x0 143 #define TSCC_TSS_INTERNAL 0x1 144 #define TSCC_TSS_EXTERNAL 0x2 145 146 /* Timestamp Counter Value Register (TSCV) */ 147 #define TSCV_TSC_MASK GENMASK(15, 0) 148 149 /* Error Counter Register (ECR) */ 150 #define ECR_RP BIT(15) 151 #define ECR_REC_MASK GENMASK(14, 8) 152 #define ECR_TEC_MASK GENMASK(7, 0) 153 154 /* Protocol Status Register (PSR) */ 155 #define PSR_BO BIT(7) 156 #define PSR_EW BIT(6) 157 #define PSR_EP BIT(5) 158 #define PSR_LEC_MASK GENMASK(2, 0) 159 #define PSR_DLEC_MASK GENMASK(10, 8) 160 161 /* Interrupt Register (IR) */ 162 #define IR_ALL_INT 0xffffffff 163 164 /* Renamed bits for versions > 3.1.x */ 165 #define IR_ARA BIT(29) 166 #define IR_PED BIT(28) 167 #define IR_PEA BIT(27) 168 169 /* Bits for version 3.0.x */ 170 #define IR_STE BIT(31) 171 #define IR_FOE BIT(30) 172 #define IR_ACKE BIT(29) 173 #define IR_BE BIT(28) 174 #define IR_CRCE BIT(27) 175 #define IR_WDI BIT(26) 176 #define IR_BO BIT(25) 177 #define IR_EW BIT(24) 178 #define IR_EP BIT(23) 179 #define IR_ELO BIT(22) 180 #define IR_BEU BIT(21) 181 #define IR_BEC BIT(20) 182 #define IR_DRX BIT(19) 183 #define IR_TOO BIT(18) 184 #define IR_MRAF BIT(17) 185 #define IR_TSW BIT(16) 186 #define IR_TEFL BIT(15) 187 #define IR_TEFF BIT(14) 188 #define IR_TEFW BIT(13) 189 #define IR_TEFN BIT(12) 190 #define IR_TFE BIT(11) 191 #define IR_TCF BIT(10) 192 #define IR_TC BIT(9) 193 #define IR_HPM BIT(8) 194 #define IR_RF1L BIT(7) 195 #define IR_RF1F BIT(6) 196 #define IR_RF1W BIT(5) 197 #define IR_RF1N BIT(4) 198 #define IR_RF0L BIT(3) 199 #define IR_RF0F BIT(2) 200 #define IR_RF0W BIT(1) 201 #define IR_RF0N BIT(0) 202 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) 203 204 /* Interrupts for version 3.0.x */ 205 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) 206 #define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ 207 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ 208 IR_RF0L) 209 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) 210 211 /* Interrupts for version >= 3.1.x */ 212 #define IR_ERR_LEC_31X (IR_PED | IR_PEA) 213 #define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ 214 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ 215 IR_RF0L) 216 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) 217 218 /* Interrupt Line Select (ILS) */ 219 #define ILS_ALL_INT0 0x0 220 #define ILS_ALL_INT1 0xFFFFFFFF 221 222 /* Interrupt Line Enable (ILE) */ 223 #define ILE_EINT1 BIT(1) 224 #define ILE_EINT0 BIT(0) 225 226 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 227 #define RXFC_FWM_MASK GENMASK(30, 24) 228 #define RXFC_FS_MASK GENMASK(22, 16) 229 230 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ 231 #define RXFS_RFL BIT(25) 232 #define RXFS_FF BIT(24) 233 #define RXFS_FPI_MASK GENMASK(21, 16) 234 #define RXFS_FGI_MASK GENMASK(13, 8) 235 #define RXFS_FFL_MASK GENMASK(6, 0) 236 237 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ 238 #define RXESC_RBDS_MASK GENMASK(10, 8) 239 #define RXESC_F1DS_MASK GENMASK(6, 4) 240 #define RXESC_F0DS_MASK GENMASK(2, 0) 241 #define RXESC_64B 0x7 242 243 /* Tx Buffer Configuration (TXBC) */ 244 #define TXBC_TFQS_MASK GENMASK(29, 24) 245 #define TXBC_NDTB_MASK GENMASK(21, 16) 246 247 /* Tx FIFO/Queue Status (TXFQS) */ 248 #define TXFQS_TFQF BIT(21) 249 #define TXFQS_TFQPI_MASK GENMASK(20, 16) 250 #define TXFQS_TFGI_MASK GENMASK(12, 8) 251 #define TXFQS_TFFL_MASK GENMASK(5, 0) 252 253 /* Tx Buffer Element Size Configuration (TXESC) */ 254 #define TXESC_TBDS_MASK GENMASK(2, 0) 255 #define TXESC_TBDS_64B 0x7 256 257 /* Tx Event FIFO Configuration (TXEFC) */ 258 #define TXEFC_EFS_MASK GENMASK(21, 16) 259 260 /* Tx Event FIFO Status (TXEFS) */ 261 #define TXEFS_TEFL BIT(25) 262 #define TXEFS_EFF BIT(24) 263 #define TXEFS_EFGI_MASK GENMASK(12, 8) 264 #define TXEFS_EFFL_MASK GENMASK(5, 0) 265 266 /* Tx Event FIFO Acknowledge (TXEFA) */ 267 #define TXEFA_EFAI_MASK GENMASK(4, 0) 268 269 /* Message RAM Configuration (in bytes) */ 270 #define SIDF_ELEMENT_SIZE 4 271 #define XIDF_ELEMENT_SIZE 8 272 #define RXF0_ELEMENT_SIZE 72 273 #define RXF1_ELEMENT_SIZE 72 274 #define RXB_ELEMENT_SIZE 72 275 #define TXE_ELEMENT_SIZE 8 276 #define TXB_ELEMENT_SIZE 72 277 278 /* Message RAM Elements */ 279 #define M_CAN_FIFO_ID 0x0 280 #define M_CAN_FIFO_DLC 0x4 281 #define M_CAN_FIFO_DATA 0x8 282 283 /* Rx Buffer Element */ 284 /* R0 */ 285 #define RX_BUF_ESI BIT(31) 286 #define RX_BUF_XTD BIT(30) 287 #define RX_BUF_RTR BIT(29) 288 /* R1 */ 289 #define RX_BUF_ANMF BIT(31) 290 #define RX_BUF_FDF BIT(21) 291 #define RX_BUF_BRS BIT(20) 292 #define RX_BUF_RXTS_MASK GENMASK(15, 0) 293 294 /* Tx Buffer Element */ 295 /* T0 */ 296 #define TX_BUF_ESI BIT(31) 297 #define TX_BUF_XTD BIT(30) 298 #define TX_BUF_RTR BIT(29) 299 /* T1 */ 300 #define TX_BUF_EFC BIT(23) 301 #define TX_BUF_FDF BIT(21) 302 #define TX_BUF_BRS BIT(20) 303 #define TX_BUF_MM_MASK GENMASK(31, 24) 304 #define TX_BUF_DLC_MASK GENMASK(19, 16) 305 306 /* Tx event FIFO Element */ 307 /* E1 */ 308 #define TX_EVENT_MM_MASK GENMASK(31, 24) 309 #define TX_EVENT_TXTS_MASK GENMASK(15, 0) 310 311 /* The ID and DLC registers are adjacent in M_CAN FIFO memory, 312 * and we can save a (potentially slow) bus round trip by combining 313 * reads and writes to them. 314 */ 315 struct id_and_dlc { 316 u32 id; 317 u32 dlc; 318 }; 319 320 static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) 321 { 322 return cdev->ops->read_reg(cdev, reg); 323 } 324 325 static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, 326 u32 val) 327 { 328 cdev->ops->write_reg(cdev, reg, val); 329 } 330 331 static int 332 m_can_fifo_read(struct m_can_classdev *cdev, 333 u32 fgi, unsigned int offset, void *val, size_t val_count) 334 { 335 u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + 336 offset; 337 338 if (val_count == 0) 339 return 0; 340 341 return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); 342 } 343 344 static int 345 m_can_fifo_write(struct m_can_classdev *cdev, 346 u32 fpi, unsigned int offset, const void *val, size_t val_count) 347 { 348 u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + 349 offset; 350 351 if (val_count == 0) 352 return 0; 353 354 return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); 355 } 356 357 static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev, 358 u32 fpi, u32 val) 359 { 360 return cdev->ops->write_fifo(cdev, fpi, &val, 1); 361 } 362 363 static int 364 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) 365 { 366 u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + 367 offset; 368 369 return cdev->ops->read_fifo(cdev, addr_offset, val, 1); 370 } 371 372 static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) 373 { 374 return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); 375 } 376 377 static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) 378 { 379 u32 cccr = m_can_read(cdev, M_CAN_CCCR); 380 u32 timeout = 10; 381 u32 val = 0; 382 383 /* Clear the Clock stop request if it was set */ 384 if (cccr & CCCR_CSR) 385 cccr &= ~CCCR_CSR; 386 387 if (enable) { 388 /* enable m_can configuration */ 389 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); 390 udelay(5); 391 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ 392 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); 393 } else { 394 m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); 395 } 396 397 /* there's a delay for module initialization */ 398 if (enable) 399 val = CCCR_INIT | CCCR_CCE; 400 401 while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { 402 if (timeout == 0) { 403 netdev_warn(cdev->net, "Failed to init module\n"); 404 return; 405 } 406 timeout--; 407 udelay(1); 408 } 409 } 410 411 static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) 412 { 413 /* Only interrupt line 0 is used in this driver */ 414 m_can_write(cdev, M_CAN_ILE, ILE_EINT0); 415 } 416 417 static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) 418 { 419 m_can_write(cdev, M_CAN_ILE, 0x0); 420 } 421 422 /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit 423 * width. 424 */ 425 static u32 m_can_get_timestamp(struct m_can_classdev *cdev) 426 { 427 u32 tscv; 428 u32 tsc; 429 430 tscv = m_can_read(cdev, M_CAN_TSCV); 431 tsc = FIELD_GET(TSCV_TSC_MASK, tscv); 432 433 return (tsc << 16); 434 } 435 436 static void m_can_clean(struct net_device *net) 437 { 438 struct m_can_classdev *cdev = netdev_priv(net); 439 440 if (cdev->tx_skb) { 441 int putidx = 0; 442 443 net->stats.tx_errors++; 444 if (cdev->version > 30) 445 putidx = FIELD_GET(TXFQS_TFQPI_MASK, 446 m_can_read(cdev, M_CAN_TXFQS)); 447 448 can_free_echo_skb(cdev->net, putidx, NULL); 449 cdev->tx_skb = NULL; 450 } 451 } 452 453 /* For peripherals, pass skb to rx-offload, which will push skb from 454 * napi. For non-peripherals, RX is done in napi already, so push 455 * directly. timestamp is used to ensure good skb ordering in 456 * rx-offload and is ignored for non-peripherals. 457 */ 458 static void m_can_receive_skb(struct m_can_classdev *cdev, 459 struct sk_buff *skb, 460 u32 timestamp) 461 { 462 if (cdev->is_peripheral) { 463 struct net_device_stats *stats = &cdev->net->stats; 464 int err; 465 466 err = can_rx_offload_queue_timestamp(&cdev->offload, skb, 467 timestamp); 468 if (err) 469 stats->rx_fifo_errors++; 470 } else { 471 netif_receive_skb(skb); 472 } 473 } 474 475 static int m_can_read_fifo(struct net_device *dev, u32 rxfs) 476 { 477 struct net_device_stats *stats = &dev->stats; 478 struct m_can_classdev *cdev = netdev_priv(dev); 479 struct canfd_frame *cf; 480 struct sk_buff *skb; 481 struct id_and_dlc fifo_header; 482 u32 fgi; 483 u32 timestamp = 0; 484 int err; 485 486 /* calculate the fifo get index for where to read data */ 487 fgi = FIELD_GET(RXFS_FGI_MASK, rxfs); 488 err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); 489 if (err) 490 goto out_fail; 491 492 if (fifo_header.dlc & RX_BUF_FDF) 493 skb = alloc_canfd_skb(dev, &cf); 494 else 495 skb = alloc_can_skb(dev, (struct can_frame **)&cf); 496 if (!skb) { 497 stats->rx_dropped++; 498 return 0; 499 } 500 501 if (fifo_header.dlc & RX_BUF_FDF) 502 cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); 503 else 504 cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); 505 506 if (fifo_header.id & RX_BUF_XTD) 507 cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; 508 else 509 cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; 510 511 if (fifo_header.id & RX_BUF_ESI) { 512 cf->flags |= CANFD_ESI; 513 netdev_dbg(dev, "ESI Error\n"); 514 } 515 516 if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) { 517 cf->can_id |= CAN_RTR_FLAG; 518 } else { 519 if (fifo_header.dlc & RX_BUF_BRS) 520 cf->flags |= CANFD_BRS; 521 522 err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, 523 cf->data, DIV_ROUND_UP(cf->len, 4)); 524 if (err) 525 goto out_free_skb; 526 527 stats->rx_bytes += cf->len; 528 } 529 stats->rx_packets++; 530 531 /* acknowledge rx fifo 0 */ 532 m_can_write(cdev, M_CAN_RXF0A, fgi); 533 534 timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16; 535 536 m_can_receive_skb(cdev, skb, timestamp); 537 538 return 0; 539 540 out_free_skb: 541 kfree_skb(skb); 542 out_fail: 543 netdev_err(dev, "FIFO read returned %d\n", err); 544 return err; 545 } 546 547 static int m_can_do_rx_poll(struct net_device *dev, int quota) 548 { 549 struct m_can_classdev *cdev = netdev_priv(dev); 550 u32 pkts = 0; 551 u32 rxfs; 552 int err; 553 554 rxfs = m_can_read(cdev, M_CAN_RXF0S); 555 if (!(rxfs & RXFS_FFL_MASK)) { 556 netdev_dbg(dev, "no messages in fifo0\n"); 557 return 0; 558 } 559 560 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { 561 err = m_can_read_fifo(dev, rxfs); 562 if (err) 563 return err; 564 565 quota--; 566 pkts++; 567 rxfs = m_can_read(cdev, M_CAN_RXF0S); 568 } 569 570 return pkts; 571 } 572 573 static int m_can_handle_lost_msg(struct net_device *dev) 574 { 575 struct m_can_classdev *cdev = netdev_priv(dev); 576 struct net_device_stats *stats = &dev->stats; 577 struct sk_buff *skb; 578 struct can_frame *frame; 579 u32 timestamp = 0; 580 581 netdev_err(dev, "msg lost in rxf0\n"); 582 583 stats->rx_errors++; 584 stats->rx_over_errors++; 585 586 skb = alloc_can_err_skb(dev, &frame); 587 if (unlikely(!skb)) 588 return 0; 589 590 frame->can_id |= CAN_ERR_CRTL; 591 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 592 593 if (cdev->is_peripheral) 594 timestamp = m_can_get_timestamp(cdev); 595 596 m_can_receive_skb(cdev, skb, timestamp); 597 598 return 1; 599 } 600 601 static int m_can_handle_lec_err(struct net_device *dev, 602 enum m_can_lec_type lec_type) 603 { 604 struct m_can_classdev *cdev = netdev_priv(dev); 605 struct net_device_stats *stats = &dev->stats; 606 struct can_frame *cf; 607 struct sk_buff *skb; 608 u32 timestamp = 0; 609 610 cdev->can.can_stats.bus_error++; 611 stats->rx_errors++; 612 613 /* propagate the error condition to the CAN stack */ 614 skb = alloc_can_err_skb(dev, &cf); 615 if (unlikely(!skb)) 616 return 0; 617 618 /* check for 'last error code' which tells us the 619 * type of the last error to occur on the CAN bus 620 */ 621 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 622 623 switch (lec_type) { 624 case LEC_STUFF_ERROR: 625 netdev_dbg(dev, "stuff error\n"); 626 cf->data[2] |= CAN_ERR_PROT_STUFF; 627 break; 628 case LEC_FORM_ERROR: 629 netdev_dbg(dev, "form error\n"); 630 cf->data[2] |= CAN_ERR_PROT_FORM; 631 break; 632 case LEC_ACK_ERROR: 633 netdev_dbg(dev, "ack error\n"); 634 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 635 break; 636 case LEC_BIT1_ERROR: 637 netdev_dbg(dev, "bit1 error\n"); 638 cf->data[2] |= CAN_ERR_PROT_BIT1; 639 break; 640 case LEC_BIT0_ERROR: 641 netdev_dbg(dev, "bit0 error\n"); 642 cf->data[2] |= CAN_ERR_PROT_BIT0; 643 break; 644 case LEC_CRC_ERROR: 645 netdev_dbg(dev, "CRC error\n"); 646 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 647 break; 648 default: 649 break; 650 } 651 652 if (cdev->is_peripheral) 653 timestamp = m_can_get_timestamp(cdev); 654 655 m_can_receive_skb(cdev, skb, timestamp); 656 657 return 1; 658 } 659 660 static int __m_can_get_berr_counter(const struct net_device *dev, 661 struct can_berr_counter *bec) 662 { 663 struct m_can_classdev *cdev = netdev_priv(dev); 664 unsigned int ecr; 665 666 ecr = m_can_read(cdev, M_CAN_ECR); 667 bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr); 668 bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr); 669 670 return 0; 671 } 672 673 static int m_can_clk_start(struct m_can_classdev *cdev) 674 { 675 if (cdev->pm_clock_support == 0) 676 return 0; 677 678 return pm_runtime_resume_and_get(cdev->dev); 679 } 680 681 static void m_can_clk_stop(struct m_can_classdev *cdev) 682 { 683 if (cdev->pm_clock_support) 684 pm_runtime_put_sync(cdev->dev); 685 } 686 687 static int m_can_get_berr_counter(const struct net_device *dev, 688 struct can_berr_counter *bec) 689 { 690 struct m_can_classdev *cdev = netdev_priv(dev); 691 int err; 692 693 err = m_can_clk_start(cdev); 694 if (err) 695 return err; 696 697 __m_can_get_berr_counter(dev, bec); 698 699 m_can_clk_stop(cdev); 700 701 return 0; 702 } 703 704 static int m_can_handle_state_change(struct net_device *dev, 705 enum can_state new_state) 706 { 707 struct m_can_classdev *cdev = netdev_priv(dev); 708 struct can_frame *cf; 709 struct sk_buff *skb; 710 struct can_berr_counter bec; 711 unsigned int ecr; 712 u32 timestamp = 0; 713 714 switch (new_state) { 715 case CAN_STATE_ERROR_WARNING: 716 /* error warning state */ 717 cdev->can.can_stats.error_warning++; 718 cdev->can.state = CAN_STATE_ERROR_WARNING; 719 break; 720 case CAN_STATE_ERROR_PASSIVE: 721 /* error passive state */ 722 cdev->can.can_stats.error_passive++; 723 cdev->can.state = CAN_STATE_ERROR_PASSIVE; 724 break; 725 case CAN_STATE_BUS_OFF: 726 /* bus-off state */ 727 cdev->can.state = CAN_STATE_BUS_OFF; 728 m_can_disable_all_interrupts(cdev); 729 cdev->can.can_stats.bus_off++; 730 can_bus_off(dev); 731 break; 732 default: 733 break; 734 } 735 736 /* propagate the error condition to the CAN stack */ 737 skb = alloc_can_err_skb(dev, &cf); 738 if (unlikely(!skb)) 739 return 0; 740 741 __m_can_get_berr_counter(dev, &bec); 742 743 switch (new_state) { 744 case CAN_STATE_ERROR_WARNING: 745 /* error warning state */ 746 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; 747 cf->data[1] = (bec.txerr > bec.rxerr) ? 748 CAN_ERR_CRTL_TX_WARNING : 749 CAN_ERR_CRTL_RX_WARNING; 750 cf->data[6] = bec.txerr; 751 cf->data[7] = bec.rxerr; 752 break; 753 case CAN_STATE_ERROR_PASSIVE: 754 /* error passive state */ 755 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; 756 ecr = m_can_read(cdev, M_CAN_ECR); 757 if (ecr & ECR_RP) 758 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 759 if (bec.txerr > 127) 760 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 761 cf->data[6] = bec.txerr; 762 cf->data[7] = bec.rxerr; 763 break; 764 case CAN_STATE_BUS_OFF: 765 /* bus-off state */ 766 cf->can_id |= CAN_ERR_BUSOFF; 767 break; 768 default: 769 break; 770 } 771 772 if (cdev->is_peripheral) 773 timestamp = m_can_get_timestamp(cdev); 774 775 m_can_receive_skb(cdev, skb, timestamp); 776 777 return 1; 778 } 779 780 static int m_can_handle_state_errors(struct net_device *dev, u32 psr) 781 { 782 struct m_can_classdev *cdev = netdev_priv(dev); 783 int work_done = 0; 784 785 if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { 786 netdev_dbg(dev, "entered error warning state\n"); 787 work_done += m_can_handle_state_change(dev, 788 CAN_STATE_ERROR_WARNING); 789 } 790 791 if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { 792 netdev_dbg(dev, "entered error passive state\n"); 793 work_done += m_can_handle_state_change(dev, 794 CAN_STATE_ERROR_PASSIVE); 795 } 796 797 if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { 798 netdev_dbg(dev, "entered error bus off state\n"); 799 work_done += m_can_handle_state_change(dev, 800 CAN_STATE_BUS_OFF); 801 } 802 803 return work_done; 804 } 805 806 static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) 807 { 808 if (irqstatus & IR_WDI) 809 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); 810 if (irqstatus & IR_BEU) 811 netdev_err(dev, "Bit Error Uncorrected\n"); 812 if (irqstatus & IR_BEC) 813 netdev_err(dev, "Bit Error Corrected\n"); 814 if (irqstatus & IR_TOO) 815 netdev_err(dev, "Timeout reached\n"); 816 if (irqstatus & IR_MRAF) 817 netdev_err(dev, "Message RAM access failure occurred\n"); 818 } 819 820 static inline bool is_lec_err(u8 lec) 821 { 822 return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE; 823 } 824 825 static inline bool m_can_is_protocol_err(u32 irqstatus) 826 { 827 return irqstatus & IR_ERR_LEC_31X; 828 } 829 830 static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) 831 { 832 struct net_device_stats *stats = &dev->stats; 833 struct m_can_classdev *cdev = netdev_priv(dev); 834 struct can_frame *cf; 835 struct sk_buff *skb; 836 u32 timestamp = 0; 837 838 /* propagate the error condition to the CAN stack */ 839 skb = alloc_can_err_skb(dev, &cf); 840 841 /* update tx error stats since there is protocol error */ 842 stats->tx_errors++; 843 844 /* update arbitration lost status */ 845 if (cdev->version >= 31 && (irqstatus & IR_PEA)) { 846 netdev_dbg(dev, "Protocol error in Arbitration fail\n"); 847 cdev->can.can_stats.arbitration_lost++; 848 if (skb) { 849 cf->can_id |= CAN_ERR_LOSTARB; 850 cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; 851 } 852 } 853 854 if (unlikely(!skb)) { 855 netdev_dbg(dev, "allocation of skb failed\n"); 856 return 0; 857 } 858 859 if (cdev->is_peripheral) 860 timestamp = m_can_get_timestamp(cdev); 861 862 m_can_receive_skb(cdev, skb, timestamp); 863 864 return 1; 865 } 866 867 static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, 868 u32 psr) 869 { 870 struct m_can_classdev *cdev = netdev_priv(dev); 871 int work_done = 0; 872 873 if (irqstatus & IR_RF0L) 874 work_done += m_can_handle_lost_msg(dev); 875 876 /* handle lec errors on the bus */ 877 if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 878 u8 lec = FIELD_GET(PSR_LEC_MASK, psr); 879 u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr); 880 881 if (is_lec_err(lec)) { 882 netdev_dbg(dev, "Arbitration phase error detected\n"); 883 work_done += m_can_handle_lec_err(dev, lec); 884 } 885 886 if (is_lec_err(dlec)) { 887 netdev_dbg(dev, "Data phase error detected\n"); 888 work_done += m_can_handle_lec_err(dev, dlec); 889 } 890 } 891 892 /* handle protocol errors in arbitration phase */ 893 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 894 m_can_is_protocol_err(irqstatus)) 895 work_done += m_can_handle_protocol_error(dev, irqstatus); 896 897 /* other unproccessed error interrupts */ 898 m_can_handle_other_err(dev, irqstatus); 899 900 return work_done; 901 } 902 903 static int m_can_rx_handler(struct net_device *dev, int quota) 904 { 905 struct m_can_classdev *cdev = netdev_priv(dev); 906 int rx_work_or_err; 907 int work_done = 0; 908 u32 irqstatus, psr; 909 910 irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); 911 if (!irqstatus) 912 goto end; 913 914 /* Errata workaround for issue "Needless activation of MRAF irq" 915 * During frame reception while the MCAN is in Error Passive state 916 * and the Receive Error Counter has the value MCAN_ECR.REC = 127, 917 * it may happen that MCAN_IR.MRAF is set although there was no 918 * Message RAM access failure. 919 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated 920 * The Message RAM Access Failure interrupt routine needs to check 921 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. 922 * In this case, reset MCAN_IR.MRAF. No further action is required. 923 */ 924 if (cdev->version <= 31 && irqstatus & IR_MRAF && 925 m_can_read(cdev, M_CAN_ECR) & ECR_RP) { 926 struct can_berr_counter bec; 927 928 __m_can_get_berr_counter(dev, &bec); 929 if (bec.rxerr == 127) { 930 m_can_write(cdev, M_CAN_IR, IR_MRAF); 931 irqstatus &= ~IR_MRAF; 932 } 933 } 934 935 psr = m_can_read(cdev, M_CAN_PSR); 936 937 if (irqstatus & IR_ERR_STATE) 938 work_done += m_can_handle_state_errors(dev, psr); 939 940 if (irqstatus & IR_ERR_BUS_30X) 941 work_done += m_can_handle_bus_errors(dev, irqstatus, psr); 942 943 if (irqstatus & IR_RF0N) { 944 rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); 945 if (rx_work_or_err < 0) 946 return rx_work_or_err; 947 948 work_done += rx_work_or_err; 949 } 950 end: 951 return work_done; 952 } 953 954 static int m_can_rx_peripheral(struct net_device *dev) 955 { 956 struct m_can_classdev *cdev = netdev_priv(dev); 957 int work_done; 958 959 work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT); 960 961 /* Don't re-enable interrupts if the driver had a fatal error 962 * (e.g., FIFO read failure). 963 */ 964 if (work_done >= 0) 965 m_can_enable_all_interrupts(cdev); 966 967 return work_done; 968 } 969 970 static int m_can_poll(struct napi_struct *napi, int quota) 971 { 972 struct net_device *dev = napi->dev; 973 struct m_can_classdev *cdev = netdev_priv(dev); 974 int work_done; 975 976 work_done = m_can_rx_handler(dev, quota); 977 978 /* Don't re-enable interrupts if the driver had a fatal error 979 * (e.g., FIFO read failure). 980 */ 981 if (work_done >= 0 && work_done < quota) { 982 napi_complete_done(napi, work_done); 983 m_can_enable_all_interrupts(cdev); 984 } 985 986 return work_done; 987 } 988 989 /* Echo tx skb and update net stats. Peripherals use rx-offload for 990 * echo. timestamp is used for peripherals to ensure correct ordering 991 * by rx-offload, and is ignored for non-peripherals. 992 */ 993 static void m_can_tx_update_stats(struct m_can_classdev *cdev, 994 unsigned int msg_mark, 995 u32 timestamp) 996 { 997 struct net_device *dev = cdev->net; 998 struct net_device_stats *stats = &dev->stats; 999 1000 if (cdev->is_peripheral) 1001 stats->tx_bytes += 1002 can_rx_offload_get_echo_skb(&cdev->offload, 1003 msg_mark, 1004 timestamp, 1005 NULL); 1006 else 1007 stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); 1008 1009 stats->tx_packets++; 1010 } 1011 1012 static int m_can_echo_tx_event(struct net_device *dev) 1013 { 1014 u32 txe_count = 0; 1015 u32 m_can_txefs; 1016 u32 fgi = 0; 1017 int i = 0; 1018 unsigned int msg_mark; 1019 1020 struct m_can_classdev *cdev = netdev_priv(dev); 1021 1022 /* read tx event fifo status */ 1023 m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); 1024 1025 /* Get Tx Event fifo element count */ 1026 txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs); 1027 1028 /* Get and process all sent elements */ 1029 for (i = 0; i < txe_count; i++) { 1030 u32 txe, timestamp = 0; 1031 int err; 1032 1033 /* retrieve get index */ 1034 fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS)); 1035 1036 /* get message marker, timestamp */ 1037 err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); 1038 if (err) { 1039 netdev_err(dev, "TXE FIFO read returned %d\n", err); 1040 return err; 1041 } 1042 1043 msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); 1044 timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16; 1045 1046 /* ack txe element */ 1047 m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, 1048 fgi)); 1049 1050 /* update stats */ 1051 m_can_tx_update_stats(cdev, msg_mark, timestamp); 1052 } 1053 1054 return 0; 1055 } 1056 1057 static irqreturn_t m_can_isr(int irq, void *dev_id) 1058 { 1059 struct net_device *dev = (struct net_device *)dev_id; 1060 struct m_can_classdev *cdev = netdev_priv(dev); 1061 u32 ir; 1062 1063 if (pm_runtime_suspended(cdev->dev)) 1064 return IRQ_NONE; 1065 ir = m_can_read(cdev, M_CAN_IR); 1066 if (!ir) 1067 return IRQ_NONE; 1068 1069 /* ACK all irqs */ 1070 if (ir & IR_ALL_INT) 1071 m_can_write(cdev, M_CAN_IR, ir); 1072 1073 if (cdev->ops->clear_interrupts) 1074 cdev->ops->clear_interrupts(cdev); 1075 1076 /* schedule NAPI in case of 1077 * - rx IRQ 1078 * - state change IRQ 1079 * - bus error IRQ and bus error reporting 1080 */ 1081 if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { 1082 cdev->irqstatus = ir; 1083 m_can_disable_all_interrupts(cdev); 1084 if (!cdev->is_peripheral) 1085 napi_schedule(&cdev->napi); 1086 else if (m_can_rx_peripheral(dev) < 0) 1087 goto out_fail; 1088 } 1089 1090 if (cdev->version == 30) { 1091 if (ir & IR_TC) { 1092 /* Transmission Complete Interrupt*/ 1093 u32 timestamp = 0; 1094 1095 if (cdev->is_peripheral) 1096 timestamp = m_can_get_timestamp(cdev); 1097 m_can_tx_update_stats(cdev, 0, timestamp); 1098 netif_wake_queue(dev); 1099 } 1100 } else { 1101 if (ir & IR_TEFN) { 1102 /* New TX FIFO Element arrived */ 1103 if (m_can_echo_tx_event(dev) != 0) 1104 goto out_fail; 1105 1106 if (netif_queue_stopped(dev) && 1107 !m_can_tx_fifo_full(cdev)) 1108 netif_wake_queue(dev); 1109 } 1110 } 1111 1112 if (cdev->is_peripheral) 1113 can_rx_offload_threaded_irq_finish(&cdev->offload); 1114 1115 return IRQ_HANDLED; 1116 1117 out_fail: 1118 m_can_disable_all_interrupts(cdev); 1119 return IRQ_HANDLED; 1120 } 1121 1122 static const struct can_bittiming_const m_can_bittiming_const_30X = { 1123 .name = KBUILD_MODNAME, 1124 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1125 .tseg1_max = 64, 1126 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1127 .tseg2_max = 16, 1128 .sjw_max = 16, 1129 .brp_min = 1, 1130 .brp_max = 1024, 1131 .brp_inc = 1, 1132 }; 1133 1134 static const struct can_bittiming_const m_can_data_bittiming_const_30X = { 1135 .name = KBUILD_MODNAME, 1136 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1137 .tseg1_max = 16, 1138 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1139 .tseg2_max = 8, 1140 .sjw_max = 4, 1141 .brp_min = 1, 1142 .brp_max = 32, 1143 .brp_inc = 1, 1144 }; 1145 1146 static const struct can_bittiming_const m_can_bittiming_const_31X = { 1147 .name = KBUILD_MODNAME, 1148 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1149 .tseg1_max = 256, 1150 .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ 1151 .tseg2_max = 128, 1152 .sjw_max = 128, 1153 .brp_min = 1, 1154 .brp_max = 512, 1155 .brp_inc = 1, 1156 }; 1157 1158 static const struct can_bittiming_const m_can_data_bittiming_const_31X = { 1159 .name = KBUILD_MODNAME, 1160 .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ 1161 .tseg1_max = 32, 1162 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1163 .tseg2_max = 16, 1164 .sjw_max = 16, 1165 .brp_min = 1, 1166 .brp_max = 32, 1167 .brp_inc = 1, 1168 }; 1169 1170 static int m_can_set_bittiming(struct net_device *dev) 1171 { 1172 struct m_can_classdev *cdev = netdev_priv(dev); 1173 const struct can_bittiming *bt = &cdev->can.bittiming; 1174 const struct can_bittiming *dbt = &cdev->can.data_bittiming; 1175 u16 brp, sjw, tseg1, tseg2; 1176 u32 reg_btp; 1177 1178 brp = bt->brp - 1; 1179 sjw = bt->sjw - 1; 1180 tseg1 = bt->prop_seg + bt->phase_seg1 - 1; 1181 tseg2 = bt->phase_seg2 - 1; 1182 reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) | 1183 FIELD_PREP(NBTP_NSJW_MASK, sjw) | 1184 FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) | 1185 FIELD_PREP(NBTP_NTSEG2_MASK, tseg2); 1186 m_can_write(cdev, M_CAN_NBTP, reg_btp); 1187 1188 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { 1189 reg_btp = 0; 1190 brp = dbt->brp - 1; 1191 sjw = dbt->sjw - 1; 1192 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; 1193 tseg2 = dbt->phase_seg2 - 1; 1194 1195 /* TDC is only needed for bitrates beyond 2.5 MBit/s. 1196 * This is mentioned in the "Bit Time Requirements for CAN FD" 1197 * paper presented at the International CAN Conference 2013 1198 */ 1199 if (dbt->bitrate > 2500000) { 1200 u32 tdco, ssp; 1201 1202 /* Use the same value of secondary sampling point 1203 * as the data sampling point 1204 */ 1205 ssp = dbt->sample_point; 1206 1207 /* Equation based on Bosch's M_CAN User Manual's 1208 * Transmitter Delay Compensation Section 1209 */ 1210 tdco = (cdev->can.clock.freq / 1000) * 1211 ssp / dbt->bitrate; 1212 1213 /* Max valid TDCO value is 127 */ 1214 if (tdco > 127) { 1215 netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n", 1216 tdco); 1217 tdco = 127; 1218 } 1219 1220 reg_btp |= DBTP_TDC; 1221 m_can_write(cdev, M_CAN_TDCR, 1222 FIELD_PREP(TDCR_TDCO_MASK, tdco)); 1223 } 1224 1225 reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) | 1226 FIELD_PREP(DBTP_DSJW_MASK, sjw) | 1227 FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) | 1228 FIELD_PREP(DBTP_DTSEG2_MASK, tseg2); 1229 1230 m_can_write(cdev, M_CAN_DBTP, reg_btp); 1231 } 1232 1233 return 0; 1234 } 1235 1236 /* Configure M_CAN chip: 1237 * - set rx buffer/fifo element size 1238 * - configure rx fifo 1239 * - accept non-matching frame into fifo 0 1240 * - configure tx buffer 1241 * - >= v3.1.x: TX FIFO is used 1242 * - configure mode 1243 * - setup bittiming 1244 * - configure timestamp generation 1245 */ 1246 static void m_can_chip_config(struct net_device *dev) 1247 { 1248 struct m_can_classdev *cdev = netdev_priv(dev); 1249 u32 cccr, test; 1250 1251 m_can_config_endisable(cdev, true); 1252 1253 /* RX Buffer/FIFO Element Size 64 bytes data field */ 1254 m_can_write(cdev, M_CAN_RXESC, 1255 FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) | 1256 FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) | 1257 FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B)); 1258 1259 /* Accept Non-matching Frames Into FIFO 0 */ 1260 m_can_write(cdev, M_CAN_GFC, 0x0); 1261 1262 if (cdev->version == 30) { 1263 /* only support one Tx Buffer currently */ 1264 m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) | 1265 cdev->mcfg[MRAM_TXB].off); 1266 } else { 1267 /* TX FIFO is used for newer IP Core versions */ 1268 m_can_write(cdev, M_CAN_TXBC, 1269 FIELD_PREP(TXBC_TFQS_MASK, 1270 cdev->mcfg[MRAM_TXB].num) | 1271 cdev->mcfg[MRAM_TXB].off); 1272 } 1273 1274 /* support 64 bytes payload */ 1275 m_can_write(cdev, M_CAN_TXESC, 1276 FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B)); 1277 1278 /* TX Event FIFO */ 1279 if (cdev->version == 30) { 1280 m_can_write(cdev, M_CAN_TXEFC, 1281 FIELD_PREP(TXEFC_EFS_MASK, 1) | 1282 cdev->mcfg[MRAM_TXE].off); 1283 } else { 1284 /* Full TX Event FIFO is used */ 1285 m_can_write(cdev, M_CAN_TXEFC, 1286 FIELD_PREP(TXEFC_EFS_MASK, 1287 cdev->mcfg[MRAM_TXE].num) | 1288 cdev->mcfg[MRAM_TXE].off); 1289 } 1290 1291 /* rx fifo configuration, blocking mode, fifo size 1 */ 1292 m_can_write(cdev, M_CAN_RXF0C, 1293 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | 1294 cdev->mcfg[MRAM_RXF0].off); 1295 1296 m_can_write(cdev, M_CAN_RXF1C, 1297 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) | 1298 cdev->mcfg[MRAM_RXF1].off); 1299 1300 cccr = m_can_read(cdev, M_CAN_CCCR); 1301 test = m_can_read(cdev, M_CAN_TEST); 1302 test &= ~TEST_LBCK; 1303 if (cdev->version == 30) { 1304 /* Version 3.0.x */ 1305 1306 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | 1307 FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) | 1308 FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK))); 1309 1310 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) 1311 cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS); 1312 1313 } else { 1314 /* Version 3.1.x or 3.2.x */ 1315 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | 1316 CCCR_NISO | CCCR_DAR); 1317 1318 /* Only 3.2.x has NISO Bit implemented */ 1319 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 1320 cccr |= CCCR_NISO; 1321 1322 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) 1323 cccr |= (CCCR_BRSE | CCCR_FDOE); 1324 } 1325 1326 /* Loopback Mode */ 1327 if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 1328 cccr |= CCCR_TEST | CCCR_MON; 1329 test |= TEST_LBCK; 1330 } 1331 1332 /* Enable Monitoring (all versions) */ 1333 if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 1334 cccr |= CCCR_MON; 1335 1336 /* Disable Auto Retransmission (all versions) */ 1337 if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 1338 cccr |= CCCR_DAR; 1339 1340 /* Write config */ 1341 m_can_write(cdev, M_CAN_CCCR, cccr); 1342 m_can_write(cdev, M_CAN_TEST, test); 1343 1344 /* Enable interrupts */ 1345 m_can_write(cdev, M_CAN_IR, IR_ALL_INT); 1346 if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 1347 if (cdev->version == 30) 1348 m_can_write(cdev, M_CAN_IE, IR_ALL_INT & 1349 ~(IR_ERR_LEC_30X)); 1350 else 1351 m_can_write(cdev, M_CAN_IE, IR_ALL_INT & 1352 ~(IR_ERR_LEC_31X)); 1353 else 1354 m_can_write(cdev, M_CAN_IE, IR_ALL_INT); 1355 1356 /* route all interrupts to INT0 */ 1357 m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); 1358 1359 /* set bittiming params */ 1360 m_can_set_bittiming(dev); 1361 1362 /* enable internal timestamp generation, with a prescaler of 16. The 1363 * prescaler is applied to the nominal bit timing 1364 */ 1365 m_can_write(cdev, M_CAN_TSCC, 1366 FIELD_PREP(TSCC_TCP_MASK, 0xf) | 1367 FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL)); 1368 1369 m_can_config_endisable(cdev, false); 1370 1371 if (cdev->ops->init) 1372 cdev->ops->init(cdev); 1373 } 1374 1375 static void m_can_start(struct net_device *dev) 1376 { 1377 struct m_can_classdev *cdev = netdev_priv(dev); 1378 1379 /* basic m_can configuration */ 1380 m_can_chip_config(dev); 1381 1382 cdev->can.state = CAN_STATE_ERROR_ACTIVE; 1383 1384 m_can_enable_all_interrupts(cdev); 1385 } 1386 1387 static int m_can_set_mode(struct net_device *dev, enum can_mode mode) 1388 { 1389 switch (mode) { 1390 case CAN_MODE_START: 1391 m_can_clean(dev); 1392 m_can_start(dev); 1393 netif_wake_queue(dev); 1394 break; 1395 default: 1396 return -EOPNOTSUPP; 1397 } 1398 1399 return 0; 1400 } 1401 1402 /* Checks core release number of M_CAN 1403 * returns 0 if an unsupported device is detected 1404 * else it returns the release and step coded as: 1405 * return value = 10 * <release> + 1 * <step> 1406 */ 1407 static int m_can_check_core_release(struct m_can_classdev *cdev) 1408 { 1409 u32 crel_reg; 1410 u8 rel; 1411 u8 step; 1412 int res; 1413 1414 /* Read Core Release Version and split into version number 1415 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; 1416 */ 1417 crel_reg = m_can_read(cdev, M_CAN_CREL); 1418 rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg); 1419 step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg); 1420 1421 if (rel == 3) { 1422 /* M_CAN v3.x.y: create return value */ 1423 res = 30 + step; 1424 } else { 1425 /* Unsupported M_CAN version */ 1426 res = 0; 1427 } 1428 1429 return res; 1430 } 1431 1432 /* Selectable Non ISO support only in version 3.2.x 1433 * This function checks if the bit is writable. 1434 */ 1435 static bool m_can_niso_supported(struct m_can_classdev *cdev) 1436 { 1437 u32 cccr_reg, cccr_poll = 0; 1438 int niso_timeout = -ETIMEDOUT; 1439 int i; 1440 1441 m_can_config_endisable(cdev, true); 1442 cccr_reg = m_can_read(cdev, M_CAN_CCCR); 1443 cccr_reg |= CCCR_NISO; 1444 m_can_write(cdev, M_CAN_CCCR, cccr_reg); 1445 1446 for (i = 0; i <= 10; i++) { 1447 cccr_poll = m_can_read(cdev, M_CAN_CCCR); 1448 if (cccr_poll == cccr_reg) { 1449 niso_timeout = 0; 1450 break; 1451 } 1452 1453 usleep_range(1, 5); 1454 } 1455 1456 /* Clear NISO */ 1457 cccr_reg &= ~(CCCR_NISO); 1458 m_can_write(cdev, M_CAN_CCCR, cccr_reg); 1459 1460 m_can_config_endisable(cdev, false); 1461 1462 /* return false if time out (-ETIMEDOUT), else return true */ 1463 return !niso_timeout; 1464 } 1465 1466 static int m_can_dev_setup(struct m_can_classdev *cdev) 1467 { 1468 struct net_device *dev = cdev->net; 1469 int m_can_version, err; 1470 1471 m_can_version = m_can_check_core_release(cdev); 1472 /* return if unsupported version */ 1473 if (!m_can_version) { 1474 dev_err(cdev->dev, "Unsupported version number: %2d", 1475 m_can_version); 1476 return -EINVAL; 1477 } 1478 1479 if (!cdev->is_peripheral) 1480 netif_napi_add(dev, &cdev->napi, m_can_poll); 1481 1482 /* Shared properties of all M_CAN versions */ 1483 cdev->version = m_can_version; 1484 cdev->can.do_set_mode = m_can_set_mode; 1485 cdev->can.do_get_berr_counter = m_can_get_berr_counter; 1486 1487 /* Set M_CAN supported operations */ 1488 cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1489 CAN_CTRLMODE_LISTENONLY | 1490 CAN_CTRLMODE_BERR_REPORTING | 1491 CAN_CTRLMODE_FD | 1492 CAN_CTRLMODE_ONE_SHOT; 1493 1494 /* Set properties depending on M_CAN version */ 1495 switch (cdev->version) { 1496 case 30: 1497 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ 1498 err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1499 if (err) 1500 return err; 1501 cdev->can.bittiming_const = &m_can_bittiming_const_30X; 1502 cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; 1503 break; 1504 case 31: 1505 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ 1506 err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1507 if (err) 1508 return err; 1509 cdev->can.bittiming_const = &m_can_bittiming_const_31X; 1510 cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; 1511 break; 1512 case 32: 1513 case 33: 1514 /* Support both MCAN version v3.2.x and v3.3.0 */ 1515 cdev->can.bittiming_const = &m_can_bittiming_const_31X; 1516 cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; 1517 1518 cdev->can.ctrlmode_supported |= 1519 (m_can_niso_supported(cdev) ? 1520 CAN_CTRLMODE_FD_NON_ISO : 0); 1521 break; 1522 default: 1523 dev_err(cdev->dev, "Unsupported version number: %2d", 1524 cdev->version); 1525 return -EINVAL; 1526 } 1527 1528 if (cdev->ops->init) 1529 cdev->ops->init(cdev); 1530 1531 return 0; 1532 } 1533 1534 static void m_can_stop(struct net_device *dev) 1535 { 1536 struct m_can_classdev *cdev = netdev_priv(dev); 1537 1538 /* disable all interrupts */ 1539 m_can_disable_all_interrupts(cdev); 1540 1541 /* Set init mode to disengage from the network */ 1542 m_can_config_endisable(cdev, true); 1543 1544 /* set the state as STOPPED */ 1545 cdev->can.state = CAN_STATE_STOPPED; 1546 } 1547 1548 static int m_can_close(struct net_device *dev) 1549 { 1550 struct m_can_classdev *cdev = netdev_priv(dev); 1551 1552 netif_stop_queue(dev); 1553 1554 if (!cdev->is_peripheral) 1555 napi_disable(&cdev->napi); 1556 1557 m_can_stop(dev); 1558 m_can_clk_stop(cdev); 1559 free_irq(dev->irq, dev); 1560 1561 if (cdev->is_peripheral) { 1562 cdev->tx_skb = NULL; 1563 destroy_workqueue(cdev->tx_wq); 1564 cdev->tx_wq = NULL; 1565 } 1566 1567 if (cdev->is_peripheral) 1568 can_rx_offload_disable(&cdev->offload); 1569 1570 close_candev(dev); 1571 1572 phy_power_off(cdev->transceiver); 1573 1574 return 0; 1575 } 1576 1577 static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) 1578 { 1579 struct m_can_classdev *cdev = netdev_priv(dev); 1580 /*get wrap around for loopback skb index */ 1581 unsigned int wrap = cdev->can.echo_skb_max; 1582 int next_idx; 1583 1584 /* calculate next index */ 1585 next_idx = (++putidx >= wrap ? 0 : putidx); 1586 1587 /* check if occupied */ 1588 return !!cdev->can.echo_skb[next_idx]; 1589 } 1590 1591 static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) 1592 { 1593 struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; 1594 struct net_device *dev = cdev->net; 1595 struct sk_buff *skb = cdev->tx_skb; 1596 struct id_and_dlc fifo_header; 1597 u32 cccr, fdflags; 1598 int err; 1599 int putidx; 1600 1601 cdev->tx_skb = NULL; 1602 1603 /* Generate ID field for TX buffer Element */ 1604 /* Common to all supported M_CAN versions */ 1605 if (cf->can_id & CAN_EFF_FLAG) { 1606 fifo_header.id = cf->can_id & CAN_EFF_MASK; 1607 fifo_header.id |= TX_BUF_XTD; 1608 } else { 1609 fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); 1610 } 1611 1612 if (cf->can_id & CAN_RTR_FLAG) 1613 fifo_header.id |= TX_BUF_RTR; 1614 1615 if (cdev->version == 30) { 1616 netif_stop_queue(dev); 1617 1618 fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; 1619 1620 /* Write the frame ID, DLC, and payload to the FIFO element. */ 1621 err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2); 1622 if (err) 1623 goto out_fail; 1624 1625 err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, 1626 cf->data, DIV_ROUND_UP(cf->len, 4)); 1627 if (err) 1628 goto out_fail; 1629 1630 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { 1631 cccr = m_can_read(cdev, M_CAN_CCCR); 1632 cccr &= ~CCCR_CMR_MASK; 1633 if (can_is_canfd_skb(skb)) { 1634 if (cf->flags & CANFD_BRS) 1635 cccr |= FIELD_PREP(CCCR_CMR_MASK, 1636 CCCR_CMR_CANFD_BRS); 1637 else 1638 cccr |= FIELD_PREP(CCCR_CMR_MASK, 1639 CCCR_CMR_CANFD); 1640 } else { 1641 cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN); 1642 } 1643 m_can_write(cdev, M_CAN_CCCR, cccr); 1644 } 1645 m_can_write(cdev, M_CAN_TXBTIE, 0x1); 1646 1647 can_put_echo_skb(skb, dev, 0, 0); 1648 1649 m_can_write(cdev, M_CAN_TXBAR, 0x1); 1650 /* End of xmit function for version 3.0.x */ 1651 } else { 1652 /* Transmit routine for version >= v3.1.x */ 1653 1654 /* Check if FIFO full */ 1655 if (m_can_tx_fifo_full(cdev)) { 1656 /* This shouldn't happen */ 1657 netif_stop_queue(dev); 1658 netdev_warn(dev, 1659 "TX queue active although FIFO is full."); 1660 1661 if (cdev->is_peripheral) { 1662 kfree_skb(skb); 1663 dev->stats.tx_dropped++; 1664 return NETDEV_TX_OK; 1665 } else { 1666 return NETDEV_TX_BUSY; 1667 } 1668 } 1669 1670 /* get put index for frame */ 1671 putidx = FIELD_GET(TXFQS_TFQPI_MASK, 1672 m_can_read(cdev, M_CAN_TXFQS)); 1673 1674 /* Construct DLC Field, with CAN-FD configuration. 1675 * Use the put index of the fifo as the message marker, 1676 * used in the TX interrupt for sending the correct echo frame. 1677 */ 1678 1679 /* get CAN FD configuration of frame */ 1680 fdflags = 0; 1681 if (can_is_canfd_skb(skb)) { 1682 fdflags |= TX_BUF_FDF; 1683 if (cf->flags & CANFD_BRS) 1684 fdflags |= TX_BUF_BRS; 1685 } 1686 1687 fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | 1688 FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | 1689 fdflags | TX_BUF_EFC; 1690 err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2); 1691 if (err) 1692 goto out_fail; 1693 1694 err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA, 1695 cf->data, DIV_ROUND_UP(cf->len, 4)); 1696 if (err) 1697 goto out_fail; 1698 1699 /* Push loopback echo. 1700 * Will be looped back on TX interrupt based on message marker 1701 */ 1702 can_put_echo_skb(skb, dev, putidx, 0); 1703 1704 /* Enable TX FIFO element to start transfer */ 1705 m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); 1706 1707 /* stop network queue if fifo full */ 1708 if (m_can_tx_fifo_full(cdev) || 1709 m_can_next_echo_skb_occupied(dev, putidx)) 1710 netif_stop_queue(dev); 1711 } 1712 1713 return NETDEV_TX_OK; 1714 1715 out_fail: 1716 netdev_err(dev, "FIFO write returned %d\n", err); 1717 m_can_disable_all_interrupts(cdev); 1718 return NETDEV_TX_BUSY; 1719 } 1720 1721 static void m_can_tx_work_queue(struct work_struct *ws) 1722 { 1723 struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, 1724 tx_work); 1725 1726 m_can_tx_handler(cdev); 1727 } 1728 1729 static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, 1730 struct net_device *dev) 1731 { 1732 struct m_can_classdev *cdev = netdev_priv(dev); 1733 1734 if (can_dev_dropped_skb(dev, skb)) 1735 return NETDEV_TX_OK; 1736 1737 if (cdev->is_peripheral) { 1738 if (cdev->tx_skb) { 1739 netdev_err(dev, "hard_xmit called while tx busy\n"); 1740 return NETDEV_TX_BUSY; 1741 } 1742 1743 if (cdev->can.state == CAN_STATE_BUS_OFF) { 1744 m_can_clean(dev); 1745 } else { 1746 /* Need to stop the queue to avoid numerous requests 1747 * from being sent. Suggested improvement is to create 1748 * a queueing mechanism that will queue the skbs and 1749 * process them in order. 1750 */ 1751 cdev->tx_skb = skb; 1752 netif_stop_queue(cdev->net); 1753 queue_work(cdev->tx_wq, &cdev->tx_work); 1754 } 1755 } else { 1756 cdev->tx_skb = skb; 1757 return m_can_tx_handler(cdev); 1758 } 1759 1760 return NETDEV_TX_OK; 1761 } 1762 1763 static int m_can_open(struct net_device *dev) 1764 { 1765 struct m_can_classdev *cdev = netdev_priv(dev); 1766 int err; 1767 1768 err = phy_power_on(cdev->transceiver); 1769 if (err) 1770 return err; 1771 1772 err = m_can_clk_start(cdev); 1773 if (err) 1774 goto out_phy_power_off; 1775 1776 /* open the can device */ 1777 err = open_candev(dev); 1778 if (err) { 1779 netdev_err(dev, "failed to open can device\n"); 1780 goto exit_disable_clks; 1781 } 1782 1783 if (cdev->is_peripheral) 1784 can_rx_offload_enable(&cdev->offload); 1785 1786 /* register interrupt handler */ 1787 if (cdev->is_peripheral) { 1788 cdev->tx_skb = NULL; 1789 cdev->tx_wq = alloc_workqueue("mcan_wq", 1790 WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); 1791 if (!cdev->tx_wq) { 1792 err = -ENOMEM; 1793 goto out_wq_fail; 1794 } 1795 1796 INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); 1797 1798 err = request_threaded_irq(dev->irq, NULL, m_can_isr, 1799 IRQF_ONESHOT, 1800 dev->name, dev); 1801 } else { 1802 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, 1803 dev); 1804 } 1805 1806 if (err < 0) { 1807 netdev_err(dev, "failed to request interrupt\n"); 1808 goto exit_irq_fail; 1809 } 1810 1811 /* start the m_can controller */ 1812 m_can_start(dev); 1813 1814 if (!cdev->is_peripheral) 1815 napi_enable(&cdev->napi); 1816 1817 netif_start_queue(dev); 1818 1819 return 0; 1820 1821 exit_irq_fail: 1822 if (cdev->is_peripheral) 1823 destroy_workqueue(cdev->tx_wq); 1824 out_wq_fail: 1825 if (cdev->is_peripheral) 1826 can_rx_offload_disable(&cdev->offload); 1827 close_candev(dev); 1828 exit_disable_clks: 1829 m_can_clk_stop(cdev); 1830 out_phy_power_off: 1831 phy_power_off(cdev->transceiver); 1832 return err; 1833 } 1834 1835 static const struct net_device_ops m_can_netdev_ops = { 1836 .ndo_open = m_can_open, 1837 .ndo_stop = m_can_close, 1838 .ndo_start_xmit = m_can_start_xmit, 1839 .ndo_change_mtu = can_change_mtu, 1840 }; 1841 1842 static const struct ethtool_ops m_can_ethtool_ops = { 1843 .get_ts_info = ethtool_op_get_ts_info, 1844 }; 1845 1846 static int register_m_can_dev(struct net_device *dev) 1847 { 1848 dev->flags |= IFF_ECHO; /* we support local echo */ 1849 dev->netdev_ops = &m_can_netdev_ops; 1850 dev->ethtool_ops = &m_can_ethtool_ops; 1851 1852 return register_candev(dev); 1853 } 1854 1855 static void m_can_of_parse_mram(struct m_can_classdev *cdev, 1856 const u32 *mram_config_vals) 1857 { 1858 cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; 1859 cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; 1860 cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + 1861 cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; 1862 cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; 1863 cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + 1864 cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; 1865 cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & 1866 FIELD_MAX(RXFC_FS_MASK); 1867 cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + 1868 cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; 1869 cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & 1870 FIELD_MAX(RXFC_FS_MASK); 1871 cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + 1872 cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; 1873 cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; 1874 cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + 1875 cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; 1876 cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; 1877 cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + 1878 cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; 1879 cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & 1880 FIELD_MAX(TXBC_NDTB_MASK); 1881 1882 dev_dbg(cdev->dev, 1883 "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", 1884 cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, 1885 cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, 1886 cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, 1887 cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, 1888 cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, 1889 cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, 1890 cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); 1891 } 1892 1893 int m_can_init_ram(struct m_can_classdev *cdev) 1894 { 1895 int end, i, start; 1896 int err = 0; 1897 1898 /* initialize the entire Message RAM in use to avoid possible 1899 * ECC/parity checksum errors when reading an uninitialized buffer 1900 */ 1901 start = cdev->mcfg[MRAM_SIDF].off; 1902 end = cdev->mcfg[MRAM_TXB].off + 1903 cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; 1904 1905 for (i = start; i < end; i += 4) { 1906 err = m_can_fifo_write_no_off(cdev, i, 0x0); 1907 if (err) 1908 break; 1909 } 1910 1911 return err; 1912 } 1913 EXPORT_SYMBOL_GPL(m_can_init_ram); 1914 1915 int m_can_class_get_clocks(struct m_can_classdev *cdev) 1916 { 1917 int ret = 0; 1918 1919 cdev->hclk = devm_clk_get(cdev->dev, "hclk"); 1920 cdev->cclk = devm_clk_get(cdev->dev, "cclk"); 1921 1922 if (IS_ERR(cdev->cclk)) { 1923 dev_err(cdev->dev, "no clock found\n"); 1924 ret = -ENODEV; 1925 } 1926 1927 return ret; 1928 } 1929 EXPORT_SYMBOL_GPL(m_can_class_get_clocks); 1930 1931 struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, 1932 int sizeof_priv) 1933 { 1934 struct m_can_classdev *class_dev = NULL; 1935 u32 mram_config_vals[MRAM_CFG_LEN]; 1936 struct net_device *net_dev; 1937 u32 tx_fifo_size; 1938 int ret; 1939 1940 ret = fwnode_property_read_u32_array(dev_fwnode(dev), 1941 "bosch,mram-cfg", 1942 mram_config_vals, 1943 sizeof(mram_config_vals) / 4); 1944 if (ret) { 1945 dev_err(dev, "Could not get Message RAM configuration."); 1946 goto out; 1947 } 1948 1949 /* Get TX FIFO size 1950 * Defines the total amount of echo buffers for loopback 1951 */ 1952 tx_fifo_size = mram_config_vals[7]; 1953 1954 /* allocate the m_can device */ 1955 net_dev = alloc_candev(sizeof_priv, tx_fifo_size); 1956 if (!net_dev) { 1957 dev_err(dev, "Failed to allocate CAN device"); 1958 goto out; 1959 } 1960 1961 class_dev = netdev_priv(net_dev); 1962 class_dev->net = net_dev; 1963 class_dev->dev = dev; 1964 SET_NETDEV_DEV(net_dev, dev); 1965 1966 m_can_of_parse_mram(class_dev, mram_config_vals); 1967 out: 1968 return class_dev; 1969 } 1970 EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); 1971 1972 void m_can_class_free_dev(struct net_device *net) 1973 { 1974 free_candev(net); 1975 } 1976 EXPORT_SYMBOL_GPL(m_can_class_free_dev); 1977 1978 int m_can_class_register(struct m_can_classdev *cdev) 1979 { 1980 int ret; 1981 1982 if (cdev->pm_clock_support) { 1983 ret = m_can_clk_start(cdev); 1984 if (ret) 1985 return ret; 1986 } 1987 1988 if (cdev->is_peripheral) { 1989 ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, 1990 NAPI_POLL_WEIGHT); 1991 if (ret) 1992 goto clk_disable; 1993 } 1994 1995 ret = m_can_dev_setup(cdev); 1996 if (ret) 1997 goto rx_offload_del; 1998 1999 ret = register_m_can_dev(cdev->net); 2000 if (ret) { 2001 dev_err(cdev->dev, "registering %s failed (err=%d)\n", 2002 cdev->net->name, ret); 2003 goto rx_offload_del; 2004 } 2005 2006 of_can_transceiver(cdev->net); 2007 2008 dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", 2009 KBUILD_MODNAME, cdev->net->irq, cdev->version); 2010 2011 /* Probe finished 2012 * Stop clocks. They will be reactivated once the M_CAN device is opened 2013 */ 2014 m_can_clk_stop(cdev); 2015 2016 return 0; 2017 2018 rx_offload_del: 2019 if (cdev->is_peripheral) 2020 can_rx_offload_del(&cdev->offload); 2021 clk_disable: 2022 m_can_clk_stop(cdev); 2023 2024 return ret; 2025 } 2026 EXPORT_SYMBOL_GPL(m_can_class_register); 2027 2028 void m_can_class_unregister(struct m_can_classdev *cdev) 2029 { 2030 if (cdev->is_peripheral) 2031 can_rx_offload_del(&cdev->offload); 2032 unregister_candev(cdev->net); 2033 } 2034 EXPORT_SYMBOL_GPL(m_can_class_unregister); 2035 2036 int m_can_class_suspend(struct device *dev) 2037 { 2038 struct m_can_classdev *cdev = dev_get_drvdata(dev); 2039 struct net_device *ndev = cdev->net; 2040 2041 if (netif_running(ndev)) { 2042 netif_stop_queue(ndev); 2043 netif_device_detach(ndev); 2044 m_can_stop(ndev); 2045 m_can_clk_stop(cdev); 2046 } 2047 2048 pinctrl_pm_select_sleep_state(dev); 2049 2050 cdev->can.state = CAN_STATE_SLEEPING; 2051 2052 return 0; 2053 } 2054 EXPORT_SYMBOL_GPL(m_can_class_suspend); 2055 2056 int m_can_class_resume(struct device *dev) 2057 { 2058 struct m_can_classdev *cdev = dev_get_drvdata(dev); 2059 struct net_device *ndev = cdev->net; 2060 2061 pinctrl_pm_select_default_state(dev); 2062 2063 cdev->can.state = CAN_STATE_ERROR_ACTIVE; 2064 2065 if (netif_running(ndev)) { 2066 int ret; 2067 2068 ret = m_can_clk_start(cdev); 2069 if (ret) 2070 return ret; 2071 2072 m_can_init_ram(cdev); 2073 m_can_start(ndev); 2074 netif_device_attach(ndev); 2075 netif_start_queue(ndev); 2076 } 2077 2078 return 0; 2079 } 2080 EXPORT_SYMBOL_GPL(m_can_class_resume); 2081 2082 MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); 2083 MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); 2084 MODULE_LICENSE("GPL v2"); 2085 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); 2086