1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver 4 // 5 // Copyright (c) 2019, 2020, 2021 Pengutronix, 6 // Marc Kleine-Budde <kernel@pengutronix.de> 7 // 8 // Based on: 9 // 10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface 11 // 12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> 13 // 14 15 #include <linux/bitfield.h> 16 #include <linux/clk.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/pm_runtime.h> 22 23 #include <asm/unaligned.h> 24 25 #include "mcp251xfd.h" 26 27 #define DEVICE_NAME "mcp251xfd" 28 29 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = { 30 .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG | 31 MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | 32 MCP251XFD_QUIRK_ECC, 33 .model = MCP251XFD_MODEL_MCP2517FD, 34 }; 35 36 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = { 37 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | 38 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, 39 .model = MCP251XFD_MODEL_MCP2518FD, 40 }; 41 42 /* Autodetect model, start with CRC enabled. */ 43 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = { 44 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | 45 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, 46 .model = MCP251XFD_MODEL_MCP251XFD, 47 }; 48 49 static const struct can_bittiming_const mcp251xfd_bittiming_const = { 50 .name = DEVICE_NAME, 51 .tseg1_min = 2, 52 .tseg1_max = 256, 53 .tseg2_min = 1, 54 .tseg2_max = 128, 55 .sjw_max = 128, 56 .brp_min = 1, 57 .brp_max = 256, 58 .brp_inc = 1, 59 }; 60 61 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { 62 .name = DEVICE_NAME, 63 .tseg1_min = 1, 64 .tseg1_max = 32, 65 .tseg2_min = 1, 66 .tseg2_max = 16, 67 .sjw_max = 16, 68 .brp_min = 1, 69 .brp_max = 256, 70 .brp_inc = 1, 71 }; 72 73 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) 74 { 75 switch (model) { 76 case MCP251XFD_MODEL_MCP2517FD: 77 return "MCP2517FD"; 78 case MCP251XFD_MODEL_MCP2518FD: 79 return "MCP2518FD"; 80 case MCP251XFD_MODEL_MCP251XFD: 81 return "MCP251xFD"; 82 } 83 84 return "<unknown>"; 85 } 86 87 static inline const char * 88 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv) 89 { 90 return __mcp251xfd_get_model_str(priv->devtype_data.model); 91 } 92 93 static const char *mcp251xfd_get_mode_str(const u8 mode) 94 { 95 switch (mode) { 96 case MCP251XFD_REG_CON_MODE_MIXED: 97 return "Mixed (CAN FD/CAN 2.0)"; 98 case MCP251XFD_REG_CON_MODE_SLEEP: 99 return "Sleep"; 100 case MCP251XFD_REG_CON_MODE_INT_LOOPBACK: 101 return "Internal Loopback"; 102 case MCP251XFD_REG_CON_MODE_LISTENONLY: 103 return "Listen Only"; 104 case MCP251XFD_REG_CON_MODE_CONFIG: 105 return "Configuration"; 106 case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK: 107 return "External Loopback"; 108 case MCP251XFD_REG_CON_MODE_CAN2_0: 109 return "CAN 2.0"; 110 case MCP251XFD_REG_CON_MODE_RESTRICTED: 111 return "Restricted Operation"; 112 } 113 114 return "<unknown>"; 115 } 116 117 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv) 118 { 119 if (!priv->reg_vdd) 120 return 0; 121 122 return regulator_enable(priv->reg_vdd); 123 } 124 125 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv) 126 { 127 if (!priv->reg_vdd) 128 return 0; 129 130 return regulator_disable(priv->reg_vdd); 131 } 132 133 static inline int 134 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv) 135 { 136 if (!priv->reg_xceiver) 137 return 0; 138 139 return regulator_enable(priv->reg_xceiver); 140 } 141 142 static inline int 143 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv) 144 { 145 if (!priv->reg_xceiver) 146 return 0; 147 148 return regulator_disable(priv->reg_xceiver); 149 } 150 151 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv) 152 { 153 int err; 154 155 err = clk_prepare_enable(priv->clk); 156 if (err) 157 return err; 158 159 err = mcp251xfd_vdd_enable(priv); 160 if (err) 161 clk_disable_unprepare(priv->clk); 162 163 /* Wait for oscillator stabilisation time after power up */ 164 usleep_range(MCP251XFD_OSC_STAB_SLEEP_US, 165 2 * MCP251XFD_OSC_STAB_SLEEP_US); 166 167 return err; 168 } 169 170 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv) 171 { 172 int err; 173 174 err = mcp251xfd_vdd_disable(priv); 175 if (err) 176 return err; 177 178 clk_disable_unprepare(priv->clk); 179 180 return 0; 181 } 182 183 static inline u8 184 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, 185 union mcp251xfd_write_reg_buf *write_reg_buf, 186 const u16 reg, const u32 mask, const u32 val) 187 { 188 u8 first_byte, last_byte, len; 189 u8 *data; 190 __le32 val_le32; 191 192 first_byte = mcp251xfd_first_byte_set(mask); 193 last_byte = mcp251xfd_last_byte_set(mask); 194 len = last_byte - first_byte + 1; 195 196 data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte); 197 val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); 198 memcpy(data, &val_le32, len); 199 200 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) { 201 u16 crc; 202 203 mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd, 204 len); 205 /* CRC */ 206 len += sizeof(write_reg_buf->crc.cmd); 207 crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len); 208 put_unaligned_be16(crc, (void *)write_reg_buf + len); 209 210 /* Total length */ 211 len += sizeof(write_reg_buf->crc.crc); 212 } else { 213 len += sizeof(write_reg_buf->nocrc.cmd); 214 } 215 216 return len; 217 } 218 219 static inline int 220 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, 221 u8 *tef_tail) 222 { 223 u32 tef_ua; 224 int err; 225 226 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua); 227 if (err) 228 return err; 229 230 *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj); 231 232 return 0; 233 } 234 235 static inline int 236 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv, 237 u8 *tx_tail) 238 { 239 u32 fifo_sta; 240 int err; 241 242 err = regmap_read(priv->map_reg, 243 MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO), 244 &fifo_sta); 245 if (err) 246 return err; 247 248 *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); 249 250 return 0; 251 } 252 253 static inline int 254 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, 255 const struct mcp251xfd_rx_ring *ring, 256 u8 *rx_head) 257 { 258 u32 fifo_sta; 259 int err; 260 261 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), 262 &fifo_sta); 263 if (err) 264 return err; 265 266 *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); 267 268 return 0; 269 } 270 271 static inline int 272 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, 273 const struct mcp251xfd_rx_ring *ring, 274 u8 *rx_tail) 275 { 276 u32 fifo_ua; 277 int err; 278 279 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr), 280 &fifo_ua); 281 if (err) 282 return err; 283 284 fifo_ua -= ring->base - MCP251XFD_RAM_START; 285 *rx_tail = fifo_ua / ring->obj_size; 286 287 return 0; 288 } 289 290 static void 291 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, 292 const struct mcp251xfd_tx_ring *ring, 293 struct mcp251xfd_tx_obj *tx_obj, 294 const u8 rts_buf_len, 295 const u8 n) 296 { 297 struct spi_transfer *xfer; 298 u16 addr; 299 300 /* FIFO load */ 301 addr = mcp251xfd_get_tx_obj_addr(ring, n); 302 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) 303 mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd, 304 addr); 305 else 306 mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd, 307 addr); 308 309 xfer = &tx_obj->xfer[0]; 310 xfer->tx_buf = &tx_obj->buf; 311 xfer->len = 0; /* actual len is assigned on the fly */ 312 xfer->cs_change = 1; 313 xfer->cs_change_delay.value = 0; 314 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 315 316 /* FIFO request to send */ 317 xfer = &tx_obj->xfer[1]; 318 xfer->tx_buf = &ring->rts_buf; 319 xfer->len = rts_buf_len; 320 321 /* SPI message */ 322 spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer, 323 ARRAY_SIZE(tx_obj->xfer)); 324 } 325 326 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) 327 { 328 struct mcp251xfd_tef_ring *tef_ring; 329 struct mcp251xfd_tx_ring *tx_ring; 330 struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; 331 struct mcp251xfd_tx_obj *tx_obj; 332 struct spi_transfer *xfer; 333 u32 val; 334 u16 addr; 335 u8 len; 336 int i, j; 337 338 netdev_reset_queue(priv->ndev); 339 340 /* TEF */ 341 tef_ring = priv->tef; 342 tef_ring->head = 0; 343 tef_ring->tail = 0; 344 345 /* FIFO increment TEF tail pointer */ 346 addr = MCP251XFD_REG_TEFCON; 347 val = MCP251XFD_REG_TEFCON_UINC; 348 len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, 349 addr, val, val); 350 351 for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { 352 xfer = &tef_ring->uinc_xfer[j]; 353 xfer->tx_buf = &tef_ring->uinc_buf; 354 xfer->len = len; 355 xfer->cs_change = 1; 356 xfer->cs_change_delay.value = 0; 357 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 358 } 359 360 /* "cs_change == 1" on the last transfer results in an active 361 * chip select after the complete SPI message. This causes the 362 * controller to interpret the next register access as 363 * data. Set "cs_change" of the last transfer to "0" to 364 * properly deactivate the chip select at the end of the 365 * message. 366 */ 367 xfer->cs_change = 0; 368 369 /* TX */ 370 tx_ring = priv->tx; 371 tx_ring->head = 0; 372 tx_ring->tail = 0; 373 tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num); 374 375 /* FIFO request to send */ 376 addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO); 377 val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; 378 len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, 379 addr, val, val); 380 381 mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) 382 mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); 383 384 /* RX */ 385 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { 386 rx_ring->head = 0; 387 rx_ring->tail = 0; 388 rx_ring->nr = i; 389 rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i); 390 391 if (!prev_rx_ring) 392 rx_ring->base = 393 mcp251xfd_get_tx_obj_addr(tx_ring, 394 tx_ring->obj_num); 395 else 396 rx_ring->base = prev_rx_ring->base + 397 prev_rx_ring->obj_size * 398 prev_rx_ring->obj_num; 399 400 prev_rx_ring = rx_ring; 401 402 /* FIFO increment RX tail pointer */ 403 addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); 404 val = MCP251XFD_REG_FIFOCON_UINC; 405 len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, 406 addr, val, val); 407 408 for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { 409 xfer = &rx_ring->uinc_xfer[j]; 410 xfer->tx_buf = &rx_ring->uinc_buf; 411 xfer->len = len; 412 xfer->cs_change = 1; 413 xfer->cs_change_delay.value = 0; 414 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 415 } 416 417 /* "cs_change == 1" on the last transfer results in an 418 * active chip select after the complete SPI 419 * message. This causes the controller to interpret 420 * the next register access as data. Set "cs_change" 421 * of the last transfer to "0" to properly deactivate 422 * the chip select at the end of the message. 423 */ 424 xfer->cs_change = 0; 425 } 426 } 427 428 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) 429 { 430 int i; 431 432 for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) { 433 kfree(priv->rx[i]); 434 priv->rx[i] = NULL; 435 } 436 } 437 438 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) 439 { 440 struct mcp251xfd_tx_ring *tx_ring; 441 struct mcp251xfd_rx_ring *rx_ring; 442 int tef_obj_size, tx_obj_size, rx_obj_size; 443 int tx_obj_num; 444 int ram_free, i; 445 446 tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj); 447 /* listen-only mode works like FD mode */ 448 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) { 449 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD; 450 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd); 451 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd); 452 } else { 453 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN; 454 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can); 455 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can); 456 } 457 458 tx_ring = priv->tx; 459 tx_ring->obj_num = tx_obj_num; 460 tx_ring->obj_size = tx_obj_size; 461 462 ram_free = MCP251XFD_RAM_SIZE - tx_obj_num * 463 (tef_obj_size + tx_obj_size); 464 465 for (i = 0; 466 i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size; 467 i++) { 468 int rx_obj_num; 469 470 rx_obj_num = ram_free / rx_obj_size; 471 rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 472 MCP251XFD_RX_OBJ_NUM_MAX); 473 474 rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, 475 GFP_KERNEL); 476 if (!rx_ring) { 477 mcp251xfd_ring_free(priv); 478 return -ENOMEM; 479 } 480 rx_ring->obj_num = rx_obj_num; 481 rx_ring->obj_size = rx_obj_size; 482 priv->rx[i] = rx_ring; 483 484 ram_free -= rx_ring->obj_num * rx_ring->obj_size; 485 } 486 priv->rx_ring_num = i; 487 488 netdev_dbg(priv->ndev, 489 "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n", 490 tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num, 491 tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num); 492 493 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { 494 netdev_dbg(priv->ndev, 495 "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n", 496 i, rx_ring->obj_num, rx_ring->obj_size, 497 rx_ring->obj_size * rx_ring->obj_num); 498 } 499 500 netdev_dbg(priv->ndev, 501 "FIFO setup: free: %d bytes\n", 502 ram_free); 503 504 return 0; 505 } 506 507 static inline int 508 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode) 509 { 510 u32 val; 511 int err; 512 513 err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val); 514 if (err) 515 return err; 516 517 *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val); 518 519 return 0; 520 } 521 522 static int 523 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, 524 const u8 mode_req, bool nowait) 525 { 526 u32 con, con_reqop; 527 int err; 528 529 con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req); 530 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON, 531 MCP251XFD_REG_CON_REQOP_MASK, con_reqop); 532 if (err) 533 return err; 534 535 if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait) 536 return 0; 537 538 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con, 539 FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, 540 con) == mode_req, 541 MCP251XFD_POLL_SLEEP_US, 542 MCP251XFD_POLL_TIMEOUT_US); 543 if (err) { 544 u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con); 545 546 netdev_err(priv->ndev, 547 "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n", 548 mcp251xfd_get_mode_str(mode_req), mode_req, 549 mcp251xfd_get_mode_str(mode), mode); 550 return err; 551 } 552 553 return 0; 554 } 555 556 static inline int 557 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, 558 const u8 mode_req) 559 { 560 return __mcp251xfd_chip_set_mode(priv, mode_req, false); 561 } 562 563 static inline int __maybe_unused 564 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv, 565 const u8 mode_req) 566 { 567 return __mcp251xfd_chip_set_mode(priv, mode_req, true); 568 } 569 570 static inline bool mcp251xfd_osc_invalid(u32 reg) 571 { 572 return reg == 0x0 || reg == 0xffffffff; 573 } 574 575 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv) 576 { 577 u32 osc, osc_reference, osc_mask; 578 int err; 579 580 /* Set Power On Defaults for "Clock Output Divisor" and remove 581 * "Oscillator Disable" bit. 582 */ 583 osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, 584 MCP251XFD_REG_OSC_CLKODIV_10); 585 osc_reference = MCP251XFD_REG_OSC_OSCRDY; 586 osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY; 587 588 /* Note: 589 * 590 * If the controller is in Sleep Mode the following write only 591 * removes the "Oscillator Disable" bit and powers it up. All 592 * other bits are unaffected. 593 */ 594 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); 595 if (err) 596 return err; 597 598 /* Wait for "Oscillator Ready" bit */ 599 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc, 600 (osc & osc_mask) == osc_reference, 601 MCP251XFD_OSC_STAB_SLEEP_US, 602 MCP251XFD_OSC_STAB_TIMEOUT_US); 603 if (mcp251xfd_osc_invalid(osc)) { 604 netdev_err(priv->ndev, 605 "Failed to detect %s (osc=0x%08x).\n", 606 mcp251xfd_get_model_str(priv), osc); 607 return -ENODEV; 608 } else if (err == -ETIMEDOUT) { 609 netdev_err(priv->ndev, 610 "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n", 611 osc, osc_reference); 612 return -ETIMEDOUT; 613 } 614 615 return err; 616 } 617 618 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) 619 { 620 const __be16 cmd = mcp251xfd_cmd_reset(); 621 int err; 622 623 /* The Set Mode and SPI Reset command only seems to works if 624 * the controller is not in Sleep Mode. 625 */ 626 err = mcp251xfd_chip_clock_enable(priv); 627 if (err) 628 return err; 629 630 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); 631 if (err) 632 return err; 633 634 /* spi_write_then_read() works with non DMA-safe buffers */ 635 return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0); 636 } 637 638 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv) 639 { 640 u32 osc, osc_reference; 641 u8 mode; 642 int err; 643 644 err = mcp251xfd_chip_get_mode(priv, &mode); 645 if (err) 646 return err; 647 648 if (mode != MCP251XFD_REG_CON_MODE_CONFIG) { 649 netdev_info(priv->ndev, 650 "Controller not in Config Mode after reset, but in %s Mode (%u).\n", 651 mcp251xfd_get_mode_str(mode), mode); 652 return -ETIMEDOUT; 653 } 654 655 osc_reference = MCP251XFD_REG_OSC_OSCRDY | 656 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, 657 MCP251XFD_REG_OSC_CLKODIV_10); 658 659 /* check reset defaults of OSC reg */ 660 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); 661 if (err) 662 return err; 663 664 if (osc != osc_reference) { 665 netdev_info(priv->ndev, 666 "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n", 667 osc, osc_reference); 668 return -ETIMEDOUT; 669 } 670 671 return 0; 672 } 673 674 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv) 675 { 676 int err, i; 677 678 for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) { 679 if (i) 680 netdev_info(priv->ndev, 681 "Retrying to reset controller.\n"); 682 683 err = mcp251xfd_chip_softreset_do(priv); 684 if (err == -ETIMEDOUT) 685 continue; 686 if (err) 687 return err; 688 689 err = mcp251xfd_chip_softreset_check(priv); 690 if (err == -ETIMEDOUT) 691 continue; 692 if (err) 693 return err; 694 695 return 0; 696 } 697 698 return err; 699 } 700 701 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) 702 { 703 u32 osc; 704 int err; 705 706 /* Activate Low Power Mode on Oscillator Disable. This only 707 * works on the MCP2518FD. The MCP2517FD will go into normal 708 * Sleep Mode instead. 709 */ 710 osc = MCP251XFD_REG_OSC_LPMEN | 711 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, 712 MCP251XFD_REG_OSC_CLKODIV_10); 713 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); 714 if (err) 715 return err; 716 717 /* Set Time Base Counter Prescaler to 1. 718 * 719 * This means an overflow of the 32 bit Time Base Counter 720 * register at 40 MHz every 107 seconds. 721 */ 722 return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON, 723 MCP251XFD_REG_TSCON_TBCEN); 724 } 725 726 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) 727 { 728 const struct can_bittiming *bt = &priv->can.bittiming; 729 const struct can_bittiming *dbt = &priv->can.data_bittiming; 730 u32 val = 0; 731 s8 tdco; 732 int err; 733 734 /* CAN Control Register 735 * 736 * - no transmit bandwidth sharing 737 * - config mode 738 * - disable transmit queue 739 * - store in transmit FIFO event 740 * - transition to restricted operation mode on system error 741 * - ESI is transmitted recessive when ESI of message is high or 742 * CAN controller error passive 743 * - restricted retransmission attempts, 744 * use TQXCON_TXAT and FIFOCON_TXAT 745 * - wake-up filter bits T11FILTER 746 * - use CAN bus line filter for wakeup 747 * - protocol exception is treated as a form error 748 * - Do not compare data bytes 749 */ 750 val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, 751 MCP251XFD_REG_CON_MODE_CONFIG) | 752 MCP251XFD_REG_CON_STEF | 753 MCP251XFD_REG_CON_ESIGM | 754 MCP251XFD_REG_CON_RTXAT | 755 FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, 756 MCP251XFD_REG_CON_WFT_T11FILTER) | 757 MCP251XFD_REG_CON_WAKFIL | 758 MCP251XFD_REG_CON_PXEDIS; 759 760 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) 761 val |= MCP251XFD_REG_CON_ISOCRCEN; 762 763 err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val); 764 if (err) 765 return err; 766 767 /* Nominal Bit Time */ 768 val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) | 769 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK, 770 bt->prop_seg + bt->phase_seg1 - 1) | 771 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, 772 bt->phase_seg2 - 1) | 773 FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1); 774 775 err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val); 776 if (err) 777 return err; 778 779 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) 780 return 0; 781 782 /* Data Bit Time */ 783 val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) | 784 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK, 785 dbt->prop_seg + dbt->phase_seg1 - 1) | 786 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, 787 dbt->phase_seg2 - 1) | 788 FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1); 789 790 err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val); 791 if (err) 792 return err; 793 794 /* Transmitter Delay Compensation */ 795 tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), 796 -64, 63); 797 val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, 798 MCP251XFD_REG_TDC_TDCMOD_AUTO) | 799 FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); 800 801 return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); 802 } 803 804 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv) 805 { 806 u32 val; 807 808 if (!priv->rx_int) 809 return 0; 810 811 /* Configure GPIOs: 812 * - PIN0: GPIO Input 813 * - PIN1: GPIO Input/RX Interrupt 814 * 815 * PIN1 must be Input, otherwise there is a glitch on the 816 * rx-INT line. It happens between setting the PIN as output 817 * (in the first byte of the SPI transfer) and configuring the 818 * PIN as interrupt (in the last byte of the SPI transfer). 819 */ 820 val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 | 821 MCP251XFD_REG_IOCON_TRIS0; 822 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); 823 } 824 825 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) 826 { 827 u32 val; 828 829 if (!priv->rx_int) 830 return 0; 831 832 /* Configure GPIOs: 833 * - PIN0: GPIO Input 834 * - PIN1: GPIO Input 835 */ 836 val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 | 837 MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0; 838 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); 839 } 840 841 static int 842 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv, 843 const struct mcp251xfd_rx_ring *ring) 844 { 845 u32 fifo_con; 846 847 /* Enable RXOVIE on _all_ RX FIFOs, not just the last one. 848 * 849 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will 850 * generate a RXOVIF, use this to properly detect RX MAB 851 * overflows. 852 */ 853 fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, 854 ring->obj_num - 1) | 855 MCP251XFD_REG_FIFOCON_RXTSEN | 856 MCP251XFD_REG_FIFOCON_RXOVIE | 857 MCP251XFD_REG_FIFOCON_TFNRFNIE; 858 859 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) 860 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 861 MCP251XFD_REG_FIFOCON_PLSIZE_64); 862 else 863 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 864 MCP251XFD_REG_FIFOCON_PLSIZE_8); 865 866 return regmap_write(priv->map_reg, 867 MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con); 868 } 869 870 static int 871 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv, 872 const struct mcp251xfd_rx_ring *ring) 873 { 874 u32 fltcon; 875 876 fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) | 877 MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr); 878 879 return regmap_update_bits(priv->map_reg, 880 MCP251XFD_REG_FLTCON(ring->nr >> 2), 881 MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr), 882 fltcon); 883 } 884 885 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) 886 { 887 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 888 const struct mcp251xfd_rx_ring *rx_ring; 889 u32 val; 890 int err, n; 891 892 /* TEF */ 893 val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, 894 tx_ring->obj_num - 1) | 895 MCP251XFD_REG_TEFCON_TEFTSEN | 896 MCP251XFD_REG_TEFCON_TEFOVIE | 897 MCP251XFD_REG_TEFCON_TEFNEIE; 898 899 err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val); 900 if (err) 901 return err; 902 903 /* FIFO 1 - TX */ 904 val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, 905 tx_ring->obj_num - 1) | 906 MCP251XFD_REG_FIFOCON_TXEN | 907 MCP251XFD_REG_FIFOCON_TXATIE; 908 909 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) 910 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 911 MCP251XFD_REG_FIFOCON_PLSIZE_64); 912 else 913 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 914 MCP251XFD_REG_FIFOCON_PLSIZE_8); 915 916 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 917 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, 918 MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT); 919 else 920 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, 921 MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); 922 923 err = regmap_write(priv->map_reg, 924 MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO), 925 val); 926 if (err) 927 return err; 928 929 /* RX FIFOs */ 930 mcp251xfd_for_each_rx_ring(priv, rx_ring, n) { 931 err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring); 932 if (err) 933 return err; 934 935 err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring); 936 if (err) 937 return err; 938 } 939 940 return 0; 941 } 942 943 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) 944 { 945 struct mcp251xfd_ecc *ecc = &priv->ecc; 946 void *ram; 947 u32 val = 0; 948 int err; 949 950 ecc->ecc_stat = 0; 951 952 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC) 953 val = MCP251XFD_REG_ECCCON_ECCEN; 954 955 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, 956 MCP251XFD_REG_ECCCON_ECCEN, val); 957 if (err) 958 return err; 959 960 ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL); 961 if (!ram) 962 return -ENOMEM; 963 964 err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram, 965 MCP251XFD_RAM_SIZE); 966 kfree(ram); 967 968 return err; 969 } 970 971 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv) 972 { 973 struct mcp251xfd_ecc *ecc = &priv->ecc; 974 975 ecc->ecc_stat = 0; 976 } 977 978 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) 979 { 980 u8 mode; 981 982 983 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 984 mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; 985 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 986 mode = MCP251XFD_REG_CON_MODE_LISTENONLY; 987 else if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 988 mode = MCP251XFD_REG_CON_MODE_MIXED; 989 else 990 mode = MCP251XFD_REG_CON_MODE_CAN2_0; 991 992 return mode; 993 } 994 995 static int 996 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv, 997 bool nowait) 998 { 999 u8 mode; 1000 1001 mode = mcp251xfd_get_normal_mode(priv); 1002 1003 return __mcp251xfd_chip_set_mode(priv, mode, nowait); 1004 } 1005 1006 static inline int 1007 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv) 1008 { 1009 return __mcp251xfd_chip_set_normal_mode(priv, false); 1010 } 1011 1012 static inline int 1013 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv) 1014 { 1015 return __mcp251xfd_chip_set_normal_mode(priv, true); 1016 } 1017 1018 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv) 1019 { 1020 u32 val; 1021 int err; 1022 1023 val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE; 1024 err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val); 1025 if (err) 1026 return err; 1027 1028 val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; 1029 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val); 1030 if (err) 1031 return err; 1032 1033 val = MCP251XFD_REG_INT_CERRIE | 1034 MCP251XFD_REG_INT_SERRIE | 1035 MCP251XFD_REG_INT_RXOVIE | 1036 MCP251XFD_REG_INT_TXATIE | 1037 MCP251XFD_REG_INT_SPICRCIE | 1038 MCP251XFD_REG_INT_ECCIE | 1039 MCP251XFD_REG_INT_TEFIE | 1040 MCP251XFD_REG_INT_MODIE | 1041 MCP251XFD_REG_INT_RXIE; 1042 1043 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 1044 val |= MCP251XFD_REG_INT_IVMIE; 1045 1046 return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val); 1047 } 1048 1049 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv) 1050 { 1051 int err; 1052 u32 mask; 1053 1054 err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0); 1055 if (err) 1056 return err; 1057 1058 mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; 1059 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, 1060 mask, 0x0); 1061 if (err) 1062 return err; 1063 1064 return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0); 1065 } 1066 1067 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, 1068 const enum can_state state) 1069 { 1070 priv->can.state = state; 1071 1072 mcp251xfd_chip_interrupts_disable(priv); 1073 mcp251xfd_chip_rx_int_disable(priv); 1074 return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); 1075 } 1076 1077 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) 1078 { 1079 int err; 1080 1081 err = mcp251xfd_chip_softreset(priv); 1082 if (err) 1083 goto out_chip_stop; 1084 1085 err = mcp251xfd_chip_clock_init(priv); 1086 if (err) 1087 goto out_chip_stop; 1088 1089 err = mcp251xfd_set_bittiming(priv); 1090 if (err) 1091 goto out_chip_stop; 1092 1093 err = mcp251xfd_chip_rx_int_enable(priv); 1094 if (err) 1095 return err; 1096 1097 err = mcp251xfd_chip_ecc_init(priv); 1098 if (err) 1099 goto out_chip_stop; 1100 1101 mcp251xfd_ring_init(priv); 1102 1103 err = mcp251xfd_chip_fifo_init(priv); 1104 if (err) 1105 goto out_chip_stop; 1106 1107 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1108 1109 err = mcp251xfd_chip_set_normal_mode(priv); 1110 if (err) 1111 goto out_chip_stop; 1112 1113 return 0; 1114 1115 out_chip_stop: 1116 mcp251xfd_dump(priv); 1117 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 1118 1119 return err; 1120 } 1121 1122 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode) 1123 { 1124 struct mcp251xfd_priv *priv = netdev_priv(ndev); 1125 int err; 1126 1127 switch (mode) { 1128 case CAN_MODE_START: 1129 err = mcp251xfd_chip_start(priv); 1130 if (err) 1131 return err; 1132 1133 err = mcp251xfd_chip_interrupts_enable(priv); 1134 if (err) { 1135 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 1136 return err; 1137 } 1138 1139 netif_wake_queue(ndev); 1140 break; 1141 1142 default: 1143 return -EOPNOTSUPP; 1144 } 1145 1146 return 0; 1147 } 1148 1149 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev, 1150 struct can_berr_counter *bec) 1151 { 1152 const struct mcp251xfd_priv *priv = netdev_priv(ndev); 1153 u32 trec; 1154 int err; 1155 1156 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); 1157 if (err) 1158 return err; 1159 1160 if (trec & MCP251XFD_REG_TREC_TXBO) 1161 bec->txerr = 256; 1162 else 1163 bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec); 1164 bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec); 1165 1166 return 0; 1167 } 1168 1169 static int mcp251xfd_get_berr_counter(const struct net_device *ndev, 1170 struct can_berr_counter *bec) 1171 { 1172 const struct mcp251xfd_priv *priv = netdev_priv(ndev); 1173 1174 /* Avoid waking up the controller if the interface is down */ 1175 if (!(ndev->flags & IFF_UP)) 1176 return 0; 1177 1178 /* The controller is powered down during Bus Off, use saved 1179 * bec values. 1180 */ 1181 if (priv->can.state == CAN_STATE_BUS_OFF) { 1182 *bec = priv->bec; 1183 return 0; 1184 } 1185 1186 return __mcp251xfd_get_berr_counter(ndev, bec); 1187 } 1188 1189 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) 1190 { 1191 u8 tef_tail_chip, tef_tail; 1192 int err; 1193 1194 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) 1195 return 0; 1196 1197 err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip); 1198 if (err) 1199 return err; 1200 1201 tef_tail = mcp251xfd_get_tef_tail(priv); 1202 if (tef_tail_chip != tef_tail) { 1203 netdev_err(priv->ndev, 1204 "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n", 1205 tef_tail_chip, tef_tail); 1206 return -EILSEQ; 1207 } 1208 1209 return 0; 1210 } 1211 1212 static int 1213 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, 1214 const struct mcp251xfd_rx_ring *ring) 1215 { 1216 u8 rx_tail_chip, rx_tail; 1217 int err; 1218 1219 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) 1220 return 0; 1221 1222 err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip); 1223 if (err) 1224 return err; 1225 1226 rx_tail = mcp251xfd_get_rx_tail(ring); 1227 if (rx_tail_chip != rx_tail) { 1228 netdev_err(priv->ndev, 1229 "RX tail of chip (%d) and ours (%d) inconsistent.\n", 1230 rx_tail_chip, rx_tail); 1231 return -EILSEQ; 1232 } 1233 1234 return 0; 1235 } 1236 1237 static int 1238 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) 1239 { 1240 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1241 u32 tef_sta; 1242 int err; 1243 1244 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); 1245 if (err) 1246 return err; 1247 1248 if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { 1249 netdev_err(priv->ndev, 1250 "Transmit Event FIFO buffer overflow.\n"); 1251 return -ENOBUFS; 1252 } 1253 1254 netdev_info(priv->ndev, 1255 "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", 1256 tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? 1257 "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? 1258 "not empty" : "empty", 1259 seq, priv->tef->tail, priv->tef->head, tx_ring->head); 1260 1261 /* The Sequence Number in the TEF doesn't match our tef_tail. */ 1262 return -EAGAIN; 1263 } 1264 1265 static int 1266 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, 1267 const struct mcp251xfd_hw_tef_obj *hw_tef_obj, 1268 unsigned int *frame_len_ptr) 1269 { 1270 struct net_device_stats *stats = &priv->ndev->stats; 1271 struct sk_buff *skb; 1272 u32 seq, seq_masked, tef_tail_masked, tef_tail; 1273 1274 seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, 1275 hw_tef_obj->flags); 1276 1277 /* Use the MCP2517FD mask on the MCP2518FD, too. We only 1278 * compare 7 bits, this should be enough to detect 1279 * net-yet-completed, i.e. old TEF objects. 1280 */ 1281 seq_masked = seq & 1282 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); 1283 tef_tail_masked = priv->tef->tail & 1284 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); 1285 if (seq_masked != tef_tail_masked) 1286 return mcp251xfd_handle_tefif_recover(priv, seq); 1287 1288 tef_tail = mcp251xfd_get_tef_tail(priv); 1289 skb = priv->can.echo_skb[tef_tail]; 1290 if (skb) 1291 mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); 1292 stats->tx_bytes += 1293 can_rx_offload_get_echo_skb(&priv->offload, 1294 tef_tail, hw_tef_obj->ts, 1295 frame_len_ptr); 1296 stats->tx_packets++; 1297 priv->tef->tail++; 1298 1299 return 0; 1300 } 1301 1302 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) 1303 { 1304 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1305 unsigned int new_head; 1306 u8 chip_tx_tail; 1307 int err; 1308 1309 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); 1310 if (err) 1311 return err; 1312 1313 /* chip_tx_tail, is the next TX-Object send by the HW. 1314 * The new TEF head must be >= the old head, ... 1315 */ 1316 new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; 1317 if (new_head <= priv->tef->head) 1318 new_head += tx_ring->obj_num; 1319 1320 /* ... but it cannot exceed the TX head. */ 1321 priv->tef->head = min(new_head, tx_ring->head); 1322 1323 return mcp251xfd_check_tef_tail(priv); 1324 } 1325 1326 static inline int 1327 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, 1328 struct mcp251xfd_hw_tef_obj *hw_tef_obj, 1329 const u8 offset, const u8 len) 1330 { 1331 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1332 const int val_bytes = regmap_get_val_bytes(priv->map_rx); 1333 1334 if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && 1335 (offset > tx_ring->obj_num || 1336 len > tx_ring->obj_num || 1337 offset + len > tx_ring->obj_num)) { 1338 netdev_err(priv->ndev, 1339 "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n", 1340 tx_ring->obj_num, offset, len); 1341 return -ERANGE; 1342 } 1343 1344 return regmap_bulk_read(priv->map_rx, 1345 mcp251xfd_get_tef_obj_addr(offset), 1346 hw_tef_obj, 1347 sizeof(*hw_tef_obj) / val_bytes * len); 1348 } 1349 1350 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) 1351 { 1352 struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; 1353 unsigned int total_frame_len = 0; 1354 u8 tef_tail, len, l; 1355 int err, i; 1356 1357 err = mcp251xfd_tef_ring_update(priv); 1358 if (err) 1359 return err; 1360 1361 tef_tail = mcp251xfd_get_tef_tail(priv); 1362 len = mcp251xfd_get_tef_len(priv); 1363 l = mcp251xfd_get_tef_linear_len(priv); 1364 err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); 1365 if (err) 1366 return err; 1367 1368 if (l < len) { 1369 err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l); 1370 if (err) 1371 return err; 1372 } 1373 1374 for (i = 0; i < len; i++) { 1375 unsigned int frame_len = 0; 1376 1377 err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); 1378 /* -EAGAIN means the Sequence Number in the TEF 1379 * doesn't match our tef_tail. This can happen if we 1380 * read the TEF objects too early. Leave loop let the 1381 * interrupt handler call us again. 1382 */ 1383 if (err == -EAGAIN) 1384 goto out_netif_wake_queue; 1385 if (err) 1386 return err; 1387 1388 total_frame_len += frame_len; 1389 } 1390 1391 out_netif_wake_queue: 1392 len = i; /* number of handled goods TEFs */ 1393 if (len) { 1394 struct mcp251xfd_tef_ring *ring = priv->tef; 1395 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1396 int offset; 1397 1398 /* Increment the TEF FIFO tail pointer 'len' times in 1399 * a single SPI message. 1400 * 1401 * Note: 1402 * Calculate offset, so that the SPI transfer ends on 1403 * the last message of the uinc_xfer array, which has 1404 * "cs_change == 0", to properly deactivate the chip 1405 * select. 1406 */ 1407 offset = ARRAY_SIZE(ring->uinc_xfer) - len; 1408 err = spi_sync_transfer(priv->spi, 1409 ring->uinc_xfer + offset, len); 1410 if (err) 1411 return err; 1412 1413 tx_ring->tail += len; 1414 netdev_completed_queue(priv->ndev, len, total_frame_len); 1415 1416 err = mcp251xfd_check_tef_tail(priv); 1417 if (err) 1418 return err; 1419 } 1420 1421 mcp251xfd_ecc_tefif_successful(priv); 1422 1423 if (mcp251xfd_get_tx_free(priv->tx)) { 1424 /* Make sure that anybody stopping the queue after 1425 * this sees the new tx_ring->tail. 1426 */ 1427 smp_mb(); 1428 netif_wake_queue(priv->ndev); 1429 } 1430 1431 return 0; 1432 } 1433 1434 static int 1435 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, 1436 struct mcp251xfd_rx_ring *ring) 1437 { 1438 u32 new_head; 1439 u8 chip_rx_head; 1440 int err; 1441 1442 err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head); 1443 if (err) 1444 return err; 1445 1446 /* chip_rx_head, is the next RX-Object filled by the HW. 1447 * The new RX head must be >= the old head. 1448 */ 1449 new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; 1450 if (new_head <= ring->head) 1451 new_head += ring->obj_num; 1452 1453 ring->head = new_head; 1454 1455 return mcp251xfd_check_rx_tail(priv, ring); 1456 } 1457 1458 static void 1459 mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv, 1460 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, 1461 struct sk_buff *skb) 1462 { 1463 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 1464 u8 dlc; 1465 1466 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) { 1467 u32 sid, eid; 1468 1469 eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id); 1470 sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); 1471 1472 cfd->can_id = CAN_EFF_FLAG | 1473 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) | 1474 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid); 1475 } else { 1476 cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, 1477 hw_rx_obj->id); 1478 } 1479 1480 dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags); 1481 1482 /* CANFD */ 1483 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) { 1484 1485 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI) 1486 cfd->flags |= CANFD_ESI; 1487 1488 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS) 1489 cfd->flags |= CANFD_BRS; 1490 1491 cfd->len = can_fd_dlc2len(dlc); 1492 } else { 1493 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) 1494 cfd->can_id |= CAN_RTR_FLAG; 1495 1496 can_frame_set_cc_len((struct can_frame *)cfd, dlc, 1497 priv->can.ctrlmode); 1498 } 1499 1500 if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) 1501 memcpy(cfd->data, hw_rx_obj->data, cfd->len); 1502 1503 mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); 1504 } 1505 1506 static int 1507 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, 1508 struct mcp251xfd_rx_ring *ring, 1509 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) 1510 { 1511 struct net_device_stats *stats = &priv->ndev->stats; 1512 struct sk_buff *skb; 1513 struct canfd_frame *cfd; 1514 int err; 1515 1516 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) 1517 skb = alloc_canfd_skb(priv->ndev, &cfd); 1518 else 1519 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd); 1520 1521 if (!skb) { 1522 stats->rx_dropped++; 1523 return 0; 1524 } 1525 1526 mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); 1527 err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts); 1528 if (err) 1529 stats->rx_fifo_errors++; 1530 1531 return 0; 1532 } 1533 1534 static inline int 1535 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, 1536 const struct mcp251xfd_rx_ring *ring, 1537 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, 1538 const u8 offset, const u8 len) 1539 { 1540 const int val_bytes = regmap_get_val_bytes(priv->map_rx); 1541 int err; 1542 1543 err = regmap_bulk_read(priv->map_rx, 1544 mcp251xfd_get_rx_obj_addr(ring, offset), 1545 hw_rx_obj, 1546 len * ring->obj_size / val_bytes); 1547 1548 return err; 1549 } 1550 1551 static int 1552 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, 1553 struct mcp251xfd_rx_ring *ring) 1554 { 1555 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; 1556 u8 rx_tail, len; 1557 int err, i; 1558 1559 err = mcp251xfd_rx_ring_update(priv, ring); 1560 if (err) 1561 return err; 1562 1563 while ((len = mcp251xfd_get_rx_linear_len(ring))) { 1564 int offset; 1565 1566 rx_tail = mcp251xfd_get_rx_tail(ring); 1567 1568 err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, 1569 rx_tail, len); 1570 if (err) 1571 return err; 1572 1573 for (i = 0; i < len; i++) { 1574 err = mcp251xfd_handle_rxif_one(priv, ring, 1575 (void *)hw_rx_obj + 1576 i * ring->obj_size); 1577 if (err) 1578 return err; 1579 } 1580 1581 /* Increment the RX FIFO tail pointer 'len' times in a 1582 * single SPI message. 1583 * 1584 * Note: 1585 * Calculate offset, so that the SPI transfer ends on 1586 * the last message of the uinc_xfer array, which has 1587 * "cs_change == 0", to properly deactivate the chip 1588 * select. 1589 */ 1590 offset = ARRAY_SIZE(ring->uinc_xfer) - len; 1591 err = spi_sync_transfer(priv->spi, 1592 ring->uinc_xfer + offset, len); 1593 if (err) 1594 return err; 1595 1596 ring->tail += len; 1597 } 1598 1599 return 0; 1600 } 1601 1602 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) 1603 { 1604 struct mcp251xfd_rx_ring *ring; 1605 int err, n; 1606 1607 mcp251xfd_for_each_rx_ring(priv, ring, n) { 1608 err = mcp251xfd_handle_rxif_ring(priv, ring); 1609 if (err) 1610 return err; 1611 } 1612 1613 return 0; 1614 } 1615 1616 static struct sk_buff * 1617 mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, 1618 struct can_frame **cf, u32 *timestamp) 1619 { 1620 struct sk_buff *skb; 1621 int err; 1622 1623 err = mcp251xfd_get_timestamp(priv, timestamp); 1624 if (err) 1625 return NULL; 1626 1627 skb = alloc_can_err_skb(priv->ndev, cf); 1628 if (skb) 1629 mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); 1630 1631 return skb; 1632 } 1633 1634 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) 1635 { 1636 struct net_device_stats *stats = &priv->ndev->stats; 1637 struct mcp251xfd_rx_ring *ring; 1638 struct sk_buff *skb; 1639 struct can_frame *cf; 1640 u32 timestamp, rxovif; 1641 int err, i; 1642 1643 stats->rx_over_errors++; 1644 stats->rx_errors++; 1645 1646 err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif); 1647 if (err) 1648 return err; 1649 1650 mcp251xfd_for_each_rx_ring(priv, ring, i) { 1651 if (!(rxovif & BIT(ring->fifo_nr))) 1652 continue; 1653 1654 /* If SERRIF is active, there was a RX MAB overflow. */ 1655 if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) { 1656 netdev_info(priv->ndev, 1657 "RX-%d: MAB overflow detected.\n", 1658 ring->nr); 1659 } else { 1660 netdev_info(priv->ndev, 1661 "RX-%d: FIFO overflow.\n", ring->nr); 1662 } 1663 1664 err = regmap_update_bits(priv->map_reg, 1665 MCP251XFD_REG_FIFOSTA(ring->fifo_nr), 1666 MCP251XFD_REG_FIFOSTA_RXOVIF, 1667 0x0); 1668 if (err) 1669 return err; 1670 } 1671 1672 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); 1673 if (!skb) 1674 return 0; 1675 1676 cf->can_id |= CAN_ERR_CRTL; 1677 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 1678 1679 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1680 if (err) 1681 stats->rx_fifo_errors++; 1682 1683 return 0; 1684 } 1685 1686 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) 1687 { 1688 netdev_info(priv->ndev, "%s\n", __func__); 1689 1690 return 0; 1691 } 1692 1693 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) 1694 { 1695 struct net_device_stats *stats = &priv->ndev->stats; 1696 u32 bdiag1, timestamp; 1697 struct sk_buff *skb; 1698 struct can_frame *cf = NULL; 1699 int err; 1700 1701 err = mcp251xfd_get_timestamp(priv, ×tamp); 1702 if (err) 1703 return err; 1704 1705 err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1); 1706 if (err) 1707 return err; 1708 1709 /* Write 0s to clear error bits, don't write 1s to non active 1710 * bits, as they will be set. 1711 */ 1712 err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0); 1713 if (err) 1714 return err; 1715 1716 priv->can.can_stats.bus_error++; 1717 1718 skb = alloc_can_err_skb(priv->ndev, &cf); 1719 if (cf) 1720 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1721 1722 /* Controller misconfiguration */ 1723 if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM)) 1724 netdev_err(priv->ndev, 1725 "recv'd DLC is larger than PLSIZE of FIFO element."); 1726 1727 /* RX errors */ 1728 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR | 1729 MCP251XFD_REG_BDIAG1_NCRCERR)) { 1730 netdev_dbg(priv->ndev, "CRC error\n"); 1731 1732 stats->rx_errors++; 1733 if (cf) 1734 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 1735 } 1736 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR | 1737 MCP251XFD_REG_BDIAG1_NSTUFERR)) { 1738 netdev_dbg(priv->ndev, "Stuff error\n"); 1739 1740 stats->rx_errors++; 1741 if (cf) 1742 cf->data[2] |= CAN_ERR_PROT_STUFF; 1743 } 1744 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR | 1745 MCP251XFD_REG_BDIAG1_NFORMERR)) { 1746 netdev_dbg(priv->ndev, "Format error\n"); 1747 1748 stats->rx_errors++; 1749 if (cf) 1750 cf->data[2] |= CAN_ERR_PROT_FORM; 1751 } 1752 1753 /* TX errors */ 1754 if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) { 1755 netdev_dbg(priv->ndev, "NACK error\n"); 1756 1757 stats->tx_errors++; 1758 if (cf) { 1759 cf->can_id |= CAN_ERR_ACK; 1760 cf->data[2] |= CAN_ERR_PROT_TX; 1761 } 1762 } 1763 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR | 1764 MCP251XFD_REG_BDIAG1_NBIT1ERR)) { 1765 netdev_dbg(priv->ndev, "Bit1 error\n"); 1766 1767 stats->tx_errors++; 1768 if (cf) 1769 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1; 1770 } 1771 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR | 1772 MCP251XFD_REG_BDIAG1_NBIT0ERR)) { 1773 netdev_dbg(priv->ndev, "Bit0 error\n"); 1774 1775 stats->tx_errors++; 1776 if (cf) 1777 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0; 1778 } 1779 1780 if (!cf) 1781 return 0; 1782 1783 mcp251xfd_skb_set_timestamp(priv, skb, timestamp); 1784 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1785 if (err) 1786 stats->rx_fifo_errors++; 1787 1788 return 0; 1789 } 1790 1791 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) 1792 { 1793 struct net_device_stats *stats = &priv->ndev->stats; 1794 struct sk_buff *skb; 1795 struct can_frame *cf = NULL; 1796 enum can_state new_state, rx_state, tx_state; 1797 u32 trec, timestamp; 1798 int err; 1799 1800 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); 1801 if (err) 1802 return err; 1803 1804 if (trec & MCP251XFD_REG_TREC_TXBO) 1805 tx_state = CAN_STATE_BUS_OFF; 1806 else if (trec & MCP251XFD_REG_TREC_TXBP) 1807 tx_state = CAN_STATE_ERROR_PASSIVE; 1808 else if (trec & MCP251XFD_REG_TREC_TXWARN) 1809 tx_state = CAN_STATE_ERROR_WARNING; 1810 else 1811 tx_state = CAN_STATE_ERROR_ACTIVE; 1812 1813 if (trec & MCP251XFD_REG_TREC_RXBP) 1814 rx_state = CAN_STATE_ERROR_PASSIVE; 1815 else if (trec & MCP251XFD_REG_TREC_RXWARN) 1816 rx_state = CAN_STATE_ERROR_WARNING; 1817 else 1818 rx_state = CAN_STATE_ERROR_ACTIVE; 1819 1820 new_state = max(tx_state, rx_state); 1821 if (new_state == priv->can.state) 1822 return 0; 1823 1824 /* The skb allocation might fail, but can_change_state() 1825 * handles cf == NULL. 1826 */ 1827 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); 1828 can_change_state(priv->ndev, cf, tx_state, rx_state); 1829 1830 if (new_state == CAN_STATE_BUS_OFF) { 1831 /* As we're going to switch off the chip now, let's 1832 * save the error counters and return them to 1833 * userspace, if do_get_berr_counter() is called while 1834 * the chip is in Bus Off. 1835 */ 1836 err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec); 1837 if (err) 1838 return err; 1839 1840 mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF); 1841 can_bus_off(priv->ndev); 1842 } 1843 1844 if (!skb) 1845 return 0; 1846 1847 if (new_state != CAN_STATE_BUS_OFF) { 1848 struct can_berr_counter bec; 1849 1850 err = mcp251xfd_get_berr_counter(priv->ndev, &bec); 1851 if (err) 1852 return err; 1853 cf->data[6] = bec.txerr; 1854 cf->data[7] = bec.rxerr; 1855 } 1856 1857 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1858 if (err) 1859 stats->rx_fifo_errors++; 1860 1861 return 0; 1862 } 1863 1864 static int 1865 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode) 1866 { 1867 const u8 mode_reference = mcp251xfd_get_normal_mode(priv); 1868 u8 mode; 1869 int err; 1870 1871 err = mcp251xfd_chip_get_mode(priv, &mode); 1872 if (err) 1873 return err; 1874 1875 if (mode == mode_reference) { 1876 netdev_dbg(priv->ndev, 1877 "Controller changed into %s Mode (%u).\n", 1878 mcp251xfd_get_mode_str(mode), mode); 1879 return 0; 1880 } 1881 1882 /* According to MCP2517FD errata DS80000792B 1., during a TX 1883 * MAB underflow, the controller will transition to Restricted 1884 * Operation Mode or Listen Only Mode (depending on SERR2LOM). 1885 * 1886 * However this is not always the case. If SERR2LOM is 1887 * configured for Restricted Operation Mode (SERR2LOM not set) 1888 * the MCP2517FD will sometimes transition to Listen Only Mode 1889 * first. When polling this bit we see that it will transition 1890 * to Restricted Operation Mode shortly after. 1891 */ 1892 if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) && 1893 (mode == MCP251XFD_REG_CON_MODE_RESTRICTED || 1894 mode == MCP251XFD_REG_CON_MODE_LISTENONLY)) 1895 netdev_dbg(priv->ndev, 1896 "Controller changed into %s Mode (%u).\n", 1897 mcp251xfd_get_mode_str(mode), mode); 1898 else 1899 netdev_err(priv->ndev, 1900 "Controller changed into %s Mode (%u).\n", 1901 mcp251xfd_get_mode_str(mode), mode); 1902 1903 /* After the application requests Normal mode, the controller 1904 * will automatically attempt to retransmit the message that 1905 * caused the TX MAB underflow. 1906 * 1907 * However, if there is an ECC error in the TX-RAM, we first 1908 * have to reload the tx-object before requesting Normal 1909 * mode. This is done later in mcp251xfd_handle_eccif(). 1910 */ 1911 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) { 1912 *set_normal_mode = true; 1913 return 0; 1914 } 1915 1916 return mcp251xfd_chip_set_normal_mode_nowait(priv); 1917 } 1918 1919 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv) 1920 { 1921 struct mcp251xfd_ecc *ecc = &priv->ecc; 1922 struct net_device_stats *stats = &priv->ndev->stats; 1923 bool handled = false; 1924 1925 /* TX MAB underflow 1926 * 1927 * According to MCP2517FD Errata DS80000792B 1. a TX MAB 1928 * underflow is indicated by SERRIF and MODIF. 1929 * 1930 * In addition to the effects mentioned in the Errata, there 1931 * are Bus Errors due to the aborted CAN frame, so a IVMIF 1932 * will be seen as well. 1933 * 1934 * Sometimes there is an ECC error in the TX-RAM, which leads 1935 * to a TX MAB underflow. 1936 * 1937 * However, probably due to a race condition, there is no 1938 * associated MODIF pending. 1939 * 1940 * Further, there are situations, where the SERRIF is caused 1941 * by an ECC error in the TX-RAM, but not even the ECCIF is 1942 * set. This only seems to happen _after_ the first occurrence 1943 * of a ECCIF (which is tracked in ecc->cnt). 1944 * 1945 * Treat all as a known system errors.. 1946 */ 1947 if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF && 1948 priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) || 1949 priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || 1950 ecc->cnt) { 1951 const char *msg; 1952 1953 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || 1954 ecc->cnt) 1955 msg = "TX MAB underflow due to ECC error detected."; 1956 else 1957 msg = "TX MAB underflow detected."; 1958 1959 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) 1960 netdev_dbg(priv->ndev, "%s\n", msg); 1961 else 1962 netdev_info(priv->ndev, "%s\n", msg); 1963 1964 stats->tx_aborted_errors++; 1965 stats->tx_errors++; 1966 handled = true; 1967 } 1968 1969 /* RX MAB overflow 1970 * 1971 * According to MCP2517FD Errata DS80000792B 1. a RX MAB 1972 * overflow is indicated by SERRIF. 1973 * 1974 * In addition to the effects mentioned in the Errata, (most 1975 * of the times) a RXOVIF is raised, if the FIFO that is being 1976 * received into has the RXOVIE activated (and we have enabled 1977 * RXOVIE on all FIFOs). 1978 * 1979 * Sometimes there is no RXOVIF just a RXIF is pending. 1980 * 1981 * Treat all as a known system errors.. 1982 */ 1983 if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF || 1984 priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) { 1985 stats->rx_dropped++; 1986 handled = true; 1987 } 1988 1989 if (!handled) 1990 netdev_err(priv->ndev, 1991 "Unhandled System Error Interrupt (intf=0x%08x)!\n", 1992 priv->regs_status.intf); 1993 1994 return 0; 1995 } 1996 1997 static int 1998 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr) 1999 { 2000 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 2001 struct mcp251xfd_ecc *ecc = &priv->ecc; 2002 struct mcp251xfd_tx_obj *tx_obj; 2003 u8 chip_tx_tail, tx_tail, offset; 2004 u16 addr; 2005 int err; 2006 2007 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat); 2008 2009 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); 2010 if (err) 2011 return err; 2012 2013 tx_tail = mcp251xfd_get_tx_tail(tx_ring); 2014 offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1); 2015 2016 /* Bail out if one of the following is met: 2017 * - tx_tail information is inconsistent 2018 * - for mcp2517fd: offset not 0 2019 * - for mcp2518fd: offset not 0 or 1 2020 */ 2021 if (chip_tx_tail != tx_tail || 2022 !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) { 2023 netdev_err(priv->ndev, 2024 "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n", 2025 addr, nr, tx_ring->tail, tx_tail, chip_tx_tail, 2026 offset); 2027 return -EINVAL; 2028 } 2029 2030 netdev_info(priv->ndev, 2031 "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n", 2032 ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ? 2033 "Single" : "Double", 2034 addr, nr, tx_ring->tail, tx_tail, offset); 2035 2036 /* reload tx_obj into controller RAM ... */ 2037 tx_obj = &tx_ring->obj[nr]; 2038 err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1); 2039 if (err) 2040 return err; 2041 2042 /* ... and trigger retransmit */ 2043 return mcp251xfd_chip_set_normal_mode(priv); 2044 } 2045 2046 static int 2047 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode) 2048 { 2049 struct mcp251xfd_ecc *ecc = &priv->ecc; 2050 const char *msg; 2051 bool in_tx_ram; 2052 u32 ecc_stat; 2053 u16 addr; 2054 u8 nr; 2055 int err; 2056 2057 err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat); 2058 if (err) 2059 return err; 2060 2061 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT, 2062 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat); 2063 if (err) 2064 return err; 2065 2066 /* Check if ECC error occurred in TX-RAM */ 2067 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat); 2068 err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr); 2069 if (!err) 2070 in_tx_ram = true; 2071 else if (err == -ENOENT) 2072 in_tx_ram = false; 2073 else 2074 return err; 2075 2076 /* Errata Reference: 2077 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2. 2078 * 2079 * ECC single error correction does not work in all cases: 2080 * 2081 * Fix/Work Around: 2082 * Enable single error correction and double error detection 2083 * interrupts by setting SECIE and DEDIE. Handle SECIF as a 2084 * detection interrupt and do not rely on the error 2085 * correction. Instead, handle both interrupts as a 2086 * notification that the RAM word at ERRADDR was corrupted. 2087 */ 2088 if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF) 2089 msg = "Single ECC Error detected at address"; 2090 else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF) 2091 msg = "Double ECC Error detected at address"; 2092 else 2093 return -EINVAL; 2094 2095 if (!in_tx_ram) { 2096 ecc->ecc_stat = 0; 2097 2098 netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr); 2099 } else { 2100 /* Re-occurring error? */ 2101 if (ecc->ecc_stat == ecc_stat) { 2102 ecc->cnt++; 2103 } else { 2104 ecc->ecc_stat = ecc_stat; 2105 ecc->cnt = 1; 2106 } 2107 2108 netdev_info(priv->ndev, 2109 "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n", 2110 msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : ""); 2111 2112 if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX) 2113 return mcp251xfd_handle_eccif_recover(priv, nr); 2114 } 2115 2116 if (set_normal_mode) 2117 return mcp251xfd_chip_set_normal_mode_nowait(priv); 2118 2119 return 0; 2120 } 2121 2122 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv) 2123 { 2124 int err; 2125 u32 crc; 2126 2127 err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc); 2128 if (err) 2129 return err; 2130 2131 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC, 2132 MCP251XFD_REG_CRC_IF_MASK, 2133 ~crc); 2134 if (err) 2135 return err; 2136 2137 if (crc & MCP251XFD_REG_CRC_FERRIF) 2138 netdev_notice(priv->ndev, "CRC write command format error.\n"); 2139 else if (crc & MCP251XFD_REG_CRC_CRCERRIF) 2140 netdev_notice(priv->ndev, 2141 "CRC write error detected. CRC=0x%04lx.\n", 2142 FIELD_GET(MCP251XFD_REG_CRC_MASK, crc)); 2143 2144 return 0; 2145 } 2146 2147 #define mcp251xfd_handle(priv, irq, ...) \ 2148 ({ \ 2149 struct mcp251xfd_priv *_priv = (priv); \ 2150 int err; \ 2151 \ 2152 err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \ 2153 if (err) \ 2154 netdev_err(_priv->ndev, \ 2155 "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \ 2156 __stringify(irq), err); \ 2157 err; \ 2158 }) 2159 2160 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) 2161 { 2162 struct mcp251xfd_priv *priv = dev_id; 2163 const int val_bytes = regmap_get_val_bytes(priv->map_reg); 2164 irqreturn_t handled = IRQ_NONE; 2165 int err; 2166 2167 if (priv->rx_int) 2168 do { 2169 int rx_pending; 2170 2171 rx_pending = gpiod_get_value_cansleep(priv->rx_int); 2172 if (!rx_pending) 2173 break; 2174 2175 err = mcp251xfd_handle(priv, rxif); 2176 if (err) 2177 goto out_fail; 2178 2179 handled = IRQ_HANDLED; 2180 } while (1); 2181 2182 do { 2183 u32 intf_pending, intf_pending_clearable; 2184 bool set_normal_mode = false; 2185 2186 err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT, 2187 &priv->regs_status, 2188 sizeof(priv->regs_status) / 2189 val_bytes); 2190 if (err) 2191 goto out_fail; 2192 2193 intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK, 2194 priv->regs_status.intf) & 2195 FIELD_GET(MCP251XFD_REG_INT_IE_MASK, 2196 priv->regs_status.intf); 2197 2198 if (!(intf_pending)) 2199 return handled; 2200 2201 /* Some interrupts must be ACKed in the 2202 * MCP251XFD_REG_INT register. 2203 * - First ACK then handle, to avoid lost-IRQ race 2204 * condition on fast re-occurring interrupts. 2205 * - Write "0" to clear active IRQs, "1" to all other, 2206 * to avoid r/m/w race condition on the 2207 * MCP251XFD_REG_INT register. 2208 */ 2209 intf_pending_clearable = intf_pending & 2210 MCP251XFD_REG_INT_IF_CLEARABLE_MASK; 2211 if (intf_pending_clearable) { 2212 err = regmap_update_bits(priv->map_reg, 2213 MCP251XFD_REG_INT, 2214 MCP251XFD_REG_INT_IF_MASK, 2215 ~intf_pending_clearable); 2216 if (err) 2217 goto out_fail; 2218 } 2219 2220 if (intf_pending & MCP251XFD_REG_INT_MODIF) { 2221 err = mcp251xfd_handle(priv, modif, &set_normal_mode); 2222 if (err) 2223 goto out_fail; 2224 } 2225 2226 if (intf_pending & MCP251XFD_REG_INT_RXIF) { 2227 err = mcp251xfd_handle(priv, rxif); 2228 if (err) 2229 goto out_fail; 2230 } 2231 2232 if (intf_pending & MCP251XFD_REG_INT_TEFIF) { 2233 err = mcp251xfd_handle(priv, tefif); 2234 if (err) 2235 goto out_fail; 2236 } 2237 2238 if (intf_pending & MCP251XFD_REG_INT_RXOVIF) { 2239 err = mcp251xfd_handle(priv, rxovif); 2240 if (err) 2241 goto out_fail; 2242 } 2243 2244 if (intf_pending & MCP251XFD_REG_INT_TXATIF) { 2245 err = mcp251xfd_handle(priv, txatif); 2246 if (err) 2247 goto out_fail; 2248 } 2249 2250 if (intf_pending & MCP251XFD_REG_INT_IVMIF) { 2251 err = mcp251xfd_handle(priv, ivmif); 2252 if (err) 2253 goto out_fail; 2254 } 2255 2256 if (intf_pending & MCP251XFD_REG_INT_SERRIF) { 2257 err = mcp251xfd_handle(priv, serrif); 2258 if (err) 2259 goto out_fail; 2260 } 2261 2262 if (intf_pending & MCP251XFD_REG_INT_ECCIF) { 2263 err = mcp251xfd_handle(priv, eccif, set_normal_mode); 2264 if (err) 2265 goto out_fail; 2266 } 2267 2268 if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) { 2269 err = mcp251xfd_handle(priv, spicrcif); 2270 if (err) 2271 goto out_fail; 2272 } 2273 2274 /* On the MCP2527FD and MCP2518FD, we don't get a 2275 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX 2276 * ERROR_ACTIVE. 2277 */ 2278 if (intf_pending & MCP251XFD_REG_INT_CERRIF || 2279 priv->can.state > CAN_STATE_ERROR_ACTIVE) { 2280 err = mcp251xfd_handle(priv, cerrif); 2281 if (err) 2282 goto out_fail; 2283 2284 /* In Bus Off we completely shut down the 2285 * controller. Every subsequent register read 2286 * will read bogus data, and if 2287 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC 2288 * check will fail, too. So leave IRQ handler 2289 * directly. 2290 */ 2291 if (priv->can.state == CAN_STATE_BUS_OFF) 2292 return IRQ_HANDLED; 2293 } 2294 2295 handled = IRQ_HANDLED; 2296 } while (1); 2297 2298 out_fail: 2299 netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", 2300 err, priv->regs_status.intf); 2301 mcp251xfd_dump(priv); 2302 mcp251xfd_chip_interrupts_disable(priv); 2303 mcp251xfd_timestamp_stop(priv); 2304 2305 return handled; 2306 } 2307 2308 static inline struct 2309 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) 2310 { 2311 u8 tx_head; 2312 2313 tx_head = mcp251xfd_get_tx_head(tx_ring); 2314 2315 return &tx_ring->obj[tx_head]; 2316 } 2317 2318 static void 2319 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, 2320 struct mcp251xfd_tx_obj *tx_obj, 2321 const struct sk_buff *skb, 2322 unsigned int seq) 2323 { 2324 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 2325 struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj; 2326 union mcp251xfd_tx_obj_load_buf *load_buf; 2327 u8 dlc; 2328 u32 id, flags; 2329 int len_sanitized = 0, len; 2330 2331 if (cfd->can_id & CAN_EFF_FLAG) { 2332 u32 sid, eid; 2333 2334 sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id); 2335 eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id); 2336 2337 id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) | 2338 FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid); 2339 2340 flags = MCP251XFD_OBJ_FLAGS_IDE; 2341 } else { 2342 id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id); 2343 flags = 0; 2344 } 2345 2346 /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't 2347 * harm, only the lower 7 bits will be transferred into the 2348 * TEF object. 2349 */ 2350 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq); 2351 2352 if (cfd->can_id & CAN_RTR_FLAG) 2353 flags |= MCP251XFD_OBJ_FLAGS_RTR; 2354 else 2355 len_sanitized = canfd_sanitize_len(cfd->len); 2356 2357 /* CANFD */ 2358 if (can_is_canfd_skb(skb)) { 2359 if (cfd->flags & CANFD_ESI) 2360 flags |= MCP251XFD_OBJ_FLAGS_ESI; 2361 2362 flags |= MCP251XFD_OBJ_FLAGS_FDF; 2363 2364 if (cfd->flags & CANFD_BRS) 2365 flags |= MCP251XFD_OBJ_FLAGS_BRS; 2366 2367 dlc = can_fd_len2dlc(cfd->len); 2368 } else { 2369 dlc = can_get_cc_dlc((struct can_frame *)cfd, 2370 priv->can.ctrlmode); 2371 } 2372 2373 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc); 2374 2375 load_buf = &tx_obj->buf; 2376 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) 2377 hw_tx_obj = &load_buf->crc.hw_tx_obj; 2378 else 2379 hw_tx_obj = &load_buf->nocrc.hw_tx_obj; 2380 2381 put_unaligned_le32(id, &hw_tx_obj->id); 2382 put_unaligned_le32(flags, &hw_tx_obj->flags); 2383 2384 /* Copy data */ 2385 memcpy(hw_tx_obj->data, cfd->data, cfd->len); 2386 2387 /* Clear unused data at end of CAN frame */ 2388 if (MCP251XFD_SANITIZE_CAN && len_sanitized) { 2389 int pad_len; 2390 2391 pad_len = len_sanitized - cfd->len; 2392 if (pad_len) 2393 memset(hw_tx_obj->data + cfd->len, 0x0, pad_len); 2394 } 2395 2396 /* Number of bytes to be written into the RAM of the controller */ 2397 len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); 2398 if (MCP251XFD_SANITIZE_CAN) 2399 len += round_up(len_sanitized, sizeof(u32)); 2400 else 2401 len += round_up(cfd->len, sizeof(u32)); 2402 2403 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) { 2404 u16 crc; 2405 2406 mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd, 2407 len); 2408 /* CRC */ 2409 len += sizeof(load_buf->crc.cmd); 2410 crc = mcp251xfd_crc16_compute(&load_buf->crc, len); 2411 put_unaligned_be16(crc, (void *)load_buf + len); 2412 2413 /* Total length */ 2414 len += sizeof(load_buf->crc.crc); 2415 } else { 2416 len += sizeof(load_buf->nocrc.cmd); 2417 } 2418 2419 tx_obj->xfer[0].len = len; 2420 } 2421 2422 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, 2423 struct mcp251xfd_tx_obj *tx_obj) 2424 { 2425 return spi_async(priv->spi, &tx_obj->msg); 2426 } 2427 2428 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, 2429 struct mcp251xfd_tx_ring *tx_ring) 2430 { 2431 if (mcp251xfd_get_tx_free(tx_ring) > 0) 2432 return false; 2433 2434 netif_stop_queue(priv->ndev); 2435 2436 /* Memory barrier before checking tx_free (head and tail) */ 2437 smp_mb(); 2438 2439 if (mcp251xfd_get_tx_free(tx_ring) == 0) { 2440 netdev_dbg(priv->ndev, 2441 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", 2442 tx_ring->head, tx_ring->tail, 2443 tx_ring->head - tx_ring->tail); 2444 2445 return true; 2446 } 2447 2448 netif_start_queue(priv->ndev); 2449 2450 return false; 2451 } 2452 2453 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, 2454 struct net_device *ndev) 2455 { 2456 struct mcp251xfd_priv *priv = netdev_priv(ndev); 2457 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 2458 struct mcp251xfd_tx_obj *tx_obj; 2459 unsigned int frame_len; 2460 u8 tx_head; 2461 int err; 2462 2463 if (can_dropped_invalid_skb(ndev, skb)) 2464 return NETDEV_TX_OK; 2465 2466 if (mcp251xfd_tx_busy(priv, tx_ring)) 2467 return NETDEV_TX_BUSY; 2468 2469 tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); 2470 mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head); 2471 2472 /* Stop queue if we occupy the complete TX FIFO */ 2473 tx_head = mcp251xfd_get_tx_head(tx_ring); 2474 tx_ring->head++; 2475 if (mcp251xfd_get_tx_free(tx_ring) == 0) 2476 netif_stop_queue(ndev); 2477 2478 frame_len = can_skb_get_frame_len(skb); 2479 err = can_put_echo_skb(skb, ndev, tx_head, frame_len); 2480 if (!err) 2481 netdev_sent_queue(priv->ndev, frame_len); 2482 2483 err = mcp251xfd_tx_obj_write(priv, tx_obj); 2484 if (err) 2485 goto out_err; 2486 2487 return NETDEV_TX_OK; 2488 2489 out_err: 2490 netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); 2491 2492 return NETDEV_TX_OK; 2493 } 2494 2495 static int mcp251xfd_open(struct net_device *ndev) 2496 { 2497 struct mcp251xfd_priv *priv = netdev_priv(ndev); 2498 const struct spi_device *spi = priv->spi; 2499 int err; 2500 2501 err = pm_runtime_get_sync(ndev->dev.parent); 2502 if (err < 0) { 2503 pm_runtime_put_noidle(ndev->dev.parent); 2504 return err; 2505 } 2506 2507 err = open_candev(ndev); 2508 if (err) 2509 goto out_pm_runtime_put; 2510 2511 err = mcp251xfd_ring_alloc(priv); 2512 if (err) 2513 goto out_close_candev; 2514 2515 err = mcp251xfd_transceiver_enable(priv); 2516 if (err) 2517 goto out_mcp251xfd_ring_free; 2518 2519 err = mcp251xfd_chip_start(priv); 2520 if (err) 2521 goto out_transceiver_disable; 2522 2523 mcp251xfd_timestamp_init(priv); 2524 can_rx_offload_enable(&priv->offload); 2525 2526 err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, 2527 IRQF_ONESHOT, dev_name(&spi->dev), 2528 priv); 2529 if (err) 2530 goto out_can_rx_offload_disable; 2531 2532 err = mcp251xfd_chip_interrupts_enable(priv); 2533 if (err) 2534 goto out_free_irq; 2535 2536 netif_start_queue(ndev); 2537 2538 return 0; 2539 2540 out_free_irq: 2541 free_irq(spi->irq, priv); 2542 out_can_rx_offload_disable: 2543 can_rx_offload_disable(&priv->offload); 2544 mcp251xfd_timestamp_stop(priv); 2545 out_transceiver_disable: 2546 mcp251xfd_transceiver_disable(priv); 2547 out_mcp251xfd_ring_free: 2548 mcp251xfd_ring_free(priv); 2549 out_close_candev: 2550 close_candev(ndev); 2551 out_pm_runtime_put: 2552 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 2553 pm_runtime_put(ndev->dev.parent); 2554 2555 return err; 2556 } 2557 2558 static int mcp251xfd_stop(struct net_device *ndev) 2559 { 2560 struct mcp251xfd_priv *priv = netdev_priv(ndev); 2561 2562 netif_stop_queue(ndev); 2563 mcp251xfd_chip_interrupts_disable(priv); 2564 free_irq(ndev->irq, priv); 2565 can_rx_offload_disable(&priv->offload); 2566 mcp251xfd_timestamp_stop(priv); 2567 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 2568 mcp251xfd_transceiver_disable(priv); 2569 mcp251xfd_ring_free(priv); 2570 close_candev(ndev); 2571 2572 pm_runtime_put(ndev->dev.parent); 2573 2574 return 0; 2575 } 2576 2577 static const struct net_device_ops mcp251xfd_netdev_ops = { 2578 .ndo_open = mcp251xfd_open, 2579 .ndo_stop = mcp251xfd_stop, 2580 .ndo_start_xmit = mcp251xfd_start_xmit, 2581 .ndo_change_mtu = can_change_mtu, 2582 }; 2583 2584 static void 2585 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv) 2586 { 2587 const struct spi_device *spi = priv->spi; 2588 const struct spi_controller *ctlr = spi->controller; 2589 2590 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) 2591 priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX; 2592 } 2593 2594 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) 2595 { 2596 const struct net_device *ndev = priv->ndev; 2597 const struct mcp251xfd_devtype_data *devtype_data; 2598 u32 osc; 2599 int err; 2600 2601 /* The OSC_LPMEN is only supported on MCP2518FD, so use it to 2602 * autodetect the model. 2603 */ 2604 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC, 2605 MCP251XFD_REG_OSC_LPMEN, 2606 MCP251XFD_REG_OSC_LPMEN); 2607 if (err) 2608 return err; 2609 2610 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); 2611 if (err) 2612 return err; 2613 2614 if (osc & MCP251XFD_REG_OSC_LPMEN) 2615 devtype_data = &mcp251xfd_devtype_data_mcp2518fd; 2616 else 2617 devtype_data = &mcp251xfd_devtype_data_mcp2517fd; 2618 2619 if (!mcp251xfd_is_251X(priv) && 2620 priv->devtype_data.model != devtype_data->model) { 2621 netdev_info(ndev, 2622 "Detected %s, but firmware specifies a %s. Fixing up.", 2623 __mcp251xfd_get_model_str(devtype_data->model), 2624 mcp251xfd_get_model_str(priv)); 2625 } 2626 priv->devtype_data = *devtype_data; 2627 2628 /* We need to preserve the Half Duplex Quirk. */ 2629 mcp251xfd_register_quirks(priv); 2630 2631 /* Re-init regmap with quirks of detected model. */ 2632 return mcp251xfd_regmap_init(priv); 2633 } 2634 2635 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) 2636 { 2637 int err, rx_pending; 2638 2639 if (!priv->rx_int) 2640 return 0; 2641 2642 err = mcp251xfd_chip_rx_int_enable(priv); 2643 if (err) 2644 return err; 2645 2646 /* Check if RX_INT is properly working. The RX_INT should not 2647 * be active after a softreset. 2648 */ 2649 rx_pending = gpiod_get_value_cansleep(priv->rx_int); 2650 2651 err = mcp251xfd_chip_rx_int_disable(priv); 2652 if (err) 2653 return err; 2654 2655 if (!rx_pending) 2656 return 0; 2657 2658 netdev_info(priv->ndev, 2659 "RX_INT active after softreset, disabling RX_INT support."); 2660 devm_gpiod_put(&priv->spi->dev, priv->rx_int); 2661 priv->rx_int = NULL; 2662 2663 return 0; 2664 } 2665 2666 static int 2667 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, 2668 u32 *dev_id, u32 *effective_speed_hz) 2669 { 2670 struct mcp251xfd_map_buf_nocrc *buf_rx; 2671 struct mcp251xfd_map_buf_nocrc *buf_tx; 2672 struct spi_transfer xfer[2] = { }; 2673 int err; 2674 2675 buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL); 2676 if (!buf_rx) 2677 return -ENOMEM; 2678 2679 buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL); 2680 if (!buf_tx) { 2681 err = -ENOMEM; 2682 goto out_kfree_buf_rx; 2683 } 2684 2685 xfer[0].tx_buf = buf_tx; 2686 xfer[0].len = sizeof(buf_tx->cmd); 2687 xfer[1].rx_buf = buf_rx->data; 2688 xfer[1].len = sizeof(dev_id); 2689 2690 mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID); 2691 err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer)); 2692 if (err) 2693 goto out_kfree_buf_tx; 2694 2695 *dev_id = be32_to_cpup((__be32 *)buf_rx->data); 2696 *effective_speed_hz = xfer->effective_speed_hz; 2697 2698 out_kfree_buf_tx: 2699 kfree(buf_tx); 2700 out_kfree_buf_rx: 2701 kfree(buf_rx); 2702 2703 return 0; 2704 } 2705 2706 #define MCP251XFD_QUIRK_ACTIVE(quirk) \ 2707 (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-') 2708 2709 static int 2710 mcp251xfd_register_done(const struct mcp251xfd_priv *priv) 2711 { 2712 u32 dev_id, effective_speed_hz; 2713 int err; 2714 2715 err = mcp251xfd_register_get_dev_id(priv, &dev_id, 2716 &effective_speed_hz); 2717 if (err) 2718 return err; 2719 2720 netdev_info(priv->ndev, 2721 "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n", 2722 mcp251xfd_get_model_str(priv), 2723 FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id), 2724 FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id), 2725 priv->rx_int ? '+' : '-', 2726 MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN), 2727 MCP251XFD_QUIRK_ACTIVE(CRC_REG), 2728 MCP251XFD_QUIRK_ACTIVE(CRC_RX), 2729 MCP251XFD_QUIRK_ACTIVE(CRC_TX), 2730 MCP251XFD_QUIRK_ACTIVE(ECC), 2731 MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX), 2732 priv->can.clock.freq / 1000000, 2733 priv->can.clock.freq % 1000000 / 1000 / 10, 2734 priv->spi_max_speed_hz_orig / 1000000, 2735 priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10, 2736 priv->spi->max_speed_hz / 1000000, 2737 priv->spi->max_speed_hz % 1000000 / 1000 / 10, 2738 effective_speed_hz / 1000000, 2739 effective_speed_hz % 1000000 / 1000 / 10); 2740 2741 return 0; 2742 } 2743 2744 static int mcp251xfd_register(struct mcp251xfd_priv *priv) 2745 { 2746 struct net_device *ndev = priv->ndev; 2747 int err; 2748 2749 err = mcp251xfd_clks_and_vdd_enable(priv); 2750 if (err) 2751 return err; 2752 2753 pm_runtime_get_noresume(ndev->dev.parent); 2754 err = pm_runtime_set_active(ndev->dev.parent); 2755 if (err) 2756 goto out_runtime_put_noidle; 2757 pm_runtime_enable(ndev->dev.parent); 2758 2759 mcp251xfd_register_quirks(priv); 2760 2761 err = mcp251xfd_chip_softreset(priv); 2762 if (err == -ENODEV) 2763 goto out_runtime_disable; 2764 if (err) 2765 goto out_chip_set_mode_sleep; 2766 2767 err = mcp251xfd_register_chip_detect(priv); 2768 if (err) 2769 goto out_chip_set_mode_sleep; 2770 2771 err = mcp251xfd_register_check_rx_int(priv); 2772 if (err) 2773 goto out_chip_set_mode_sleep; 2774 2775 err = register_candev(ndev); 2776 if (err) 2777 goto out_chip_set_mode_sleep; 2778 2779 err = mcp251xfd_register_done(priv); 2780 if (err) 2781 goto out_unregister_candev; 2782 2783 /* Put controller into sleep mode and let pm_runtime_put() 2784 * disable the clocks and vdd. If CONFIG_PM is not enabled, 2785 * the clocks and vdd will stay powered. 2786 */ 2787 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); 2788 if (err) 2789 goto out_unregister_candev; 2790 2791 pm_runtime_put(ndev->dev.parent); 2792 2793 return 0; 2794 2795 out_unregister_candev: 2796 unregister_candev(ndev); 2797 out_chip_set_mode_sleep: 2798 mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); 2799 out_runtime_disable: 2800 pm_runtime_disable(ndev->dev.parent); 2801 out_runtime_put_noidle: 2802 pm_runtime_put_noidle(ndev->dev.parent); 2803 mcp251xfd_clks_and_vdd_disable(priv); 2804 2805 return err; 2806 } 2807 2808 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv) 2809 { 2810 struct net_device *ndev = priv->ndev; 2811 2812 unregister_candev(ndev); 2813 2814 pm_runtime_get_sync(ndev->dev.parent); 2815 pm_runtime_put_noidle(ndev->dev.parent); 2816 mcp251xfd_clks_and_vdd_disable(priv); 2817 pm_runtime_disable(ndev->dev.parent); 2818 } 2819 2820 static const struct of_device_id mcp251xfd_of_match[] = { 2821 { 2822 .compatible = "microchip,mcp2517fd", 2823 .data = &mcp251xfd_devtype_data_mcp2517fd, 2824 }, { 2825 .compatible = "microchip,mcp2518fd", 2826 .data = &mcp251xfd_devtype_data_mcp2518fd, 2827 }, { 2828 .compatible = "microchip,mcp251xfd", 2829 .data = &mcp251xfd_devtype_data_mcp251xfd, 2830 }, { 2831 /* sentinel */ 2832 }, 2833 }; 2834 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match); 2835 2836 static const struct spi_device_id mcp251xfd_id_table[] = { 2837 { 2838 .name = "mcp2517fd", 2839 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd, 2840 }, { 2841 .name = "mcp2518fd", 2842 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd, 2843 }, { 2844 .name = "mcp251xfd", 2845 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd, 2846 }, { 2847 /* sentinel */ 2848 }, 2849 }; 2850 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table); 2851 2852 static int mcp251xfd_probe(struct spi_device *spi) 2853 { 2854 const void *match; 2855 struct net_device *ndev; 2856 struct mcp251xfd_priv *priv; 2857 struct gpio_desc *rx_int; 2858 struct regulator *reg_vdd, *reg_xceiver; 2859 struct clk *clk; 2860 u32 freq; 2861 int err; 2862 2863 if (!spi->irq) 2864 return dev_err_probe(&spi->dev, -ENXIO, 2865 "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n"); 2866 2867 rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int", 2868 GPIOD_IN); 2869 if (IS_ERR(rx_int)) 2870 return dev_err_probe(&spi->dev, PTR_ERR(rx_int), 2871 "Failed to get RX-INT!\n"); 2872 2873 reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd"); 2874 if (PTR_ERR(reg_vdd) == -ENODEV) 2875 reg_vdd = NULL; 2876 else if (IS_ERR(reg_vdd)) 2877 return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd), 2878 "Failed to get VDD regulator!\n"); 2879 2880 reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); 2881 if (PTR_ERR(reg_xceiver) == -ENODEV) 2882 reg_xceiver = NULL; 2883 else if (IS_ERR(reg_xceiver)) 2884 return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver), 2885 "Failed to get Transceiver regulator!\n"); 2886 2887 clk = devm_clk_get(&spi->dev, NULL); 2888 if (IS_ERR(clk)) 2889 return dev_err_probe(&spi->dev, PTR_ERR(clk), 2890 "Failed to get Oscillator (clock)!\n"); 2891 freq = clk_get_rate(clk); 2892 2893 /* Sanity check */ 2894 if (freq < MCP251XFD_SYSCLOCK_HZ_MIN || 2895 freq > MCP251XFD_SYSCLOCK_HZ_MAX) { 2896 dev_err(&spi->dev, 2897 "Oscillator frequency (%u Hz) is too low or high.\n", 2898 freq); 2899 return -ERANGE; 2900 } 2901 2902 if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) { 2903 dev_err(&spi->dev, 2904 "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n", 2905 freq); 2906 return -ERANGE; 2907 } 2908 2909 ndev = alloc_candev(sizeof(struct mcp251xfd_priv), 2910 MCP251XFD_TX_OBJ_NUM_MAX); 2911 if (!ndev) 2912 return -ENOMEM; 2913 2914 SET_NETDEV_DEV(ndev, &spi->dev); 2915 2916 ndev->netdev_ops = &mcp251xfd_netdev_ops; 2917 ndev->irq = spi->irq; 2918 ndev->flags |= IFF_ECHO; 2919 2920 priv = netdev_priv(ndev); 2921 spi_set_drvdata(spi, priv); 2922 priv->can.clock.freq = freq; 2923 priv->can.do_set_mode = mcp251xfd_set_mode; 2924 priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; 2925 priv->can.bittiming_const = &mcp251xfd_bittiming_const; 2926 priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; 2927 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 2928 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | 2929 CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | 2930 CAN_CTRLMODE_CC_LEN8_DLC; 2931 priv->ndev = ndev; 2932 priv->spi = spi; 2933 priv->rx_int = rx_int; 2934 priv->clk = clk; 2935 priv->reg_vdd = reg_vdd; 2936 priv->reg_xceiver = reg_xceiver; 2937 2938 match = device_get_match_data(&spi->dev); 2939 if (match) 2940 priv->devtype_data = *(struct mcp251xfd_devtype_data *)match; 2941 else 2942 priv->devtype_data = *(struct mcp251xfd_devtype_data *) 2943 spi_get_device_id(spi)->driver_data; 2944 2945 /* Errata Reference: 2946 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4. 2947 * 2948 * The SPI can write corrupted data to the RAM at fast SPI 2949 * speeds: 2950 * 2951 * Simultaneous activity on the CAN bus while writing data to 2952 * RAM via the SPI interface, with high SCK frequency, can 2953 * lead to corrupted data being written to RAM. 2954 * 2955 * Fix/Work Around: 2956 * Ensure that FSCK is less than or equal to 0.85 * 2957 * (FSYSCLK/2). 2958 * 2959 * Known good combinations are: 2960 * 2961 * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk config 2962 * 2963 * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx> 2964 * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx> 2965 * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default 2966 * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default 2967 * 2518 40 MHz fsl,imx6dl fsl,imx51-ecspi 15000000 Hz 75.00% 30000000 Hz default 2968 * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT> 2969 * 2970 */ 2971 priv->spi_max_speed_hz_orig = spi->max_speed_hz; 2972 spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850); 2973 spi->bits_per_word = 8; 2974 spi->rt = true; 2975 err = spi_setup(spi); 2976 if (err) 2977 goto out_free_candev; 2978 2979 err = mcp251xfd_regmap_init(priv); 2980 if (err) 2981 goto out_free_candev; 2982 2983 err = can_rx_offload_add_manual(ndev, &priv->offload, 2984 MCP251XFD_NAPI_WEIGHT); 2985 if (err) 2986 goto out_free_candev; 2987 2988 err = mcp251xfd_register(priv); 2989 if (err) 2990 goto out_can_rx_offload_del; 2991 2992 return 0; 2993 2994 out_can_rx_offload_del: 2995 can_rx_offload_del(&priv->offload); 2996 out_free_candev: 2997 spi->max_speed_hz = priv->spi_max_speed_hz_orig; 2998 2999 free_candev(ndev); 3000 3001 return err; 3002 } 3003 3004 static int mcp251xfd_remove(struct spi_device *spi) 3005 { 3006 struct mcp251xfd_priv *priv = spi_get_drvdata(spi); 3007 struct net_device *ndev = priv->ndev; 3008 3009 can_rx_offload_del(&priv->offload); 3010 mcp251xfd_unregister(priv); 3011 spi->max_speed_hz = priv->spi_max_speed_hz_orig; 3012 free_candev(ndev); 3013 3014 return 0; 3015 } 3016 3017 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) 3018 { 3019 const struct mcp251xfd_priv *priv = dev_get_drvdata(device); 3020 3021 return mcp251xfd_clks_and_vdd_disable(priv); 3022 } 3023 3024 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device) 3025 { 3026 const struct mcp251xfd_priv *priv = dev_get_drvdata(device); 3027 3028 return mcp251xfd_clks_and_vdd_enable(priv); 3029 } 3030 3031 static const struct dev_pm_ops mcp251xfd_pm_ops = { 3032 SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend, 3033 mcp251xfd_runtime_resume, NULL) 3034 }; 3035 3036 static struct spi_driver mcp251xfd_driver = { 3037 .driver = { 3038 .name = DEVICE_NAME, 3039 .pm = &mcp251xfd_pm_ops, 3040 .of_match_table = mcp251xfd_of_match, 3041 }, 3042 .probe = mcp251xfd_probe, 3043 .remove = mcp251xfd_remove, 3044 .id_table = mcp251xfd_id_table, 3045 }; 3046 module_spi_driver(mcp251xfd_driver); 3047 3048 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); 3049 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver"); 3050 MODULE_LICENSE("GPL v2"); 3051