1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver 4 // 5 // Copyright (c) 2019, 2020 Pengutronix, 6 // Marc Kleine-Budde <kernel@pengutronix.de> 7 // 8 // Based on: 9 // 10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface 11 // 12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> 13 // 14 15 #include <linux/bitfield.h> 16 #include <linux/clk.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/pm_runtime.h> 23 24 #include <asm/unaligned.h> 25 26 #include "mcp251xfd.h" 27 28 #define DEVICE_NAME "mcp251xfd" 29 30 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = { 31 .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG | 32 MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | 33 MCP251XFD_QUIRK_ECC, 34 .model = MCP251XFD_MODEL_MCP2517FD, 35 }; 36 37 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = { 38 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | 39 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, 40 .model = MCP251XFD_MODEL_MCP2518FD, 41 }; 42 43 /* Autodetect model, start with CRC enabled. */ 44 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = { 45 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | 46 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, 47 .model = MCP251XFD_MODEL_MCP251XFD, 48 }; 49 50 static const struct can_bittiming_const mcp251xfd_bittiming_const = { 51 .name = DEVICE_NAME, 52 .tseg1_min = 2, 53 .tseg1_max = 256, 54 .tseg2_min = 1, 55 .tseg2_max = 128, 56 .sjw_max = 128, 57 .brp_min = 1, 58 .brp_max = 256, 59 .brp_inc = 1, 60 }; 61 62 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { 63 .name = DEVICE_NAME, 64 .tseg1_min = 1, 65 .tseg1_max = 32, 66 .tseg2_min = 1, 67 .tseg2_max = 16, 68 .sjw_max = 16, 69 .brp_min = 1, 70 .brp_max = 256, 71 .brp_inc = 1, 72 }; 73 74 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) 75 { 76 switch (model) { 77 case MCP251XFD_MODEL_MCP2517FD: 78 return "MCP2517FD"; 79 case MCP251XFD_MODEL_MCP2518FD: 80 return "MCP2518FD"; 81 case MCP251XFD_MODEL_MCP251XFD: 82 return "MCP251xFD"; 83 } 84 85 return "<unknown>"; 86 } 87 88 static inline const char * 89 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv) 90 { 91 return __mcp251xfd_get_model_str(priv->devtype_data.model); 92 } 93 94 static const char *mcp251xfd_get_mode_str(const u8 mode) 95 { 96 switch (mode) { 97 case MCP251XFD_REG_CON_MODE_MIXED: 98 return "Mixed (CAN FD/CAN 2.0)"; 99 case MCP251XFD_REG_CON_MODE_SLEEP: 100 return "Sleep"; 101 case MCP251XFD_REG_CON_MODE_INT_LOOPBACK: 102 return "Internal Loopback"; 103 case MCP251XFD_REG_CON_MODE_LISTENONLY: 104 return "Listen Only"; 105 case MCP251XFD_REG_CON_MODE_CONFIG: 106 return "Configuration"; 107 case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK: 108 return "External Loopback"; 109 case MCP251XFD_REG_CON_MODE_CAN2_0: 110 return "CAN 2.0"; 111 case MCP251XFD_REG_CON_MODE_RESTRICTED: 112 return "Restricted Operation"; 113 } 114 115 return "<unknown>"; 116 } 117 118 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv) 119 { 120 if (!priv->reg_vdd) 121 return 0; 122 123 return regulator_enable(priv->reg_vdd); 124 } 125 126 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv) 127 { 128 if (!priv->reg_vdd) 129 return 0; 130 131 return regulator_disable(priv->reg_vdd); 132 } 133 134 static inline int 135 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv) 136 { 137 if (!priv->reg_xceiver) 138 return 0; 139 140 return regulator_enable(priv->reg_xceiver); 141 } 142 143 static inline int 144 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv) 145 { 146 if (!priv->reg_xceiver) 147 return 0; 148 149 return regulator_disable(priv->reg_xceiver); 150 } 151 152 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv) 153 { 154 int err; 155 156 err = clk_prepare_enable(priv->clk); 157 if (err) 158 return err; 159 160 err = mcp251xfd_vdd_enable(priv); 161 if (err) 162 clk_disable_unprepare(priv->clk); 163 164 /* Wait for oscillator stabilisation time after power up */ 165 usleep_range(MCP251XFD_OSC_STAB_SLEEP_US, 166 2 * MCP251XFD_OSC_STAB_SLEEP_US); 167 168 return err; 169 } 170 171 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv) 172 { 173 int err; 174 175 err = mcp251xfd_vdd_disable(priv); 176 if (err) 177 return err; 178 179 clk_disable_unprepare(priv->clk); 180 181 return 0; 182 } 183 184 static inline u8 185 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, 186 union mcp251xfd_write_reg_buf *write_reg_buf, 187 const u16 reg, const u32 mask, const u32 val) 188 { 189 u8 first_byte, last_byte, len; 190 u8 *data; 191 __le32 val_le32; 192 193 first_byte = mcp251xfd_first_byte_set(mask); 194 last_byte = mcp251xfd_last_byte_set(mask); 195 len = last_byte - first_byte + 1; 196 197 data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte); 198 val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); 199 memcpy(data, &val_le32, len); 200 201 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) { 202 u16 crc; 203 204 mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd, 205 len); 206 /* CRC */ 207 len += sizeof(write_reg_buf->crc.cmd); 208 crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len); 209 put_unaligned_be16(crc, (void *)write_reg_buf + len); 210 211 /* Total length */ 212 len += sizeof(write_reg_buf->crc.crc); 213 } else { 214 len += sizeof(write_reg_buf->nocrc.cmd); 215 } 216 217 return len; 218 } 219 220 static inline int 221 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, 222 u8 *tef_tail) 223 { 224 u32 tef_ua; 225 int err; 226 227 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua); 228 if (err) 229 return err; 230 231 *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj); 232 233 return 0; 234 } 235 236 static inline int 237 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv, 238 u8 *tx_tail) 239 { 240 u32 fifo_sta; 241 int err; 242 243 err = regmap_read(priv->map_reg, 244 MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO), 245 &fifo_sta); 246 if (err) 247 return err; 248 249 *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); 250 251 return 0; 252 } 253 254 static inline int 255 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, 256 const struct mcp251xfd_rx_ring *ring, 257 u8 *rx_head) 258 { 259 u32 fifo_sta; 260 int err; 261 262 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), 263 &fifo_sta); 264 if (err) 265 return err; 266 267 *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); 268 269 return 0; 270 } 271 272 static inline int 273 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, 274 const struct mcp251xfd_rx_ring *ring, 275 u8 *rx_tail) 276 { 277 u32 fifo_ua; 278 int err; 279 280 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr), 281 &fifo_ua); 282 if (err) 283 return err; 284 285 fifo_ua -= ring->base - MCP251XFD_RAM_START; 286 *rx_tail = fifo_ua / ring->obj_size; 287 288 return 0; 289 } 290 291 static void 292 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, 293 const struct mcp251xfd_tx_ring *ring, 294 struct mcp251xfd_tx_obj *tx_obj, 295 const u8 rts_buf_len, 296 const u8 n) 297 { 298 struct spi_transfer *xfer; 299 u16 addr; 300 301 /* FIFO load */ 302 addr = mcp251xfd_get_tx_obj_addr(ring, n); 303 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) 304 mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd, 305 addr); 306 else 307 mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd, 308 addr); 309 310 xfer = &tx_obj->xfer[0]; 311 xfer->tx_buf = &tx_obj->buf; 312 xfer->len = 0; /* actual len is assigned on the fly */ 313 xfer->cs_change = 1; 314 xfer->cs_change_delay.value = 0; 315 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 316 317 /* FIFO request to send */ 318 xfer = &tx_obj->xfer[1]; 319 xfer->tx_buf = &ring->rts_buf; 320 xfer->len = rts_buf_len; 321 322 /* SPI message */ 323 spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer, 324 ARRAY_SIZE(tx_obj->xfer)); 325 } 326 327 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) 328 { 329 struct mcp251xfd_tef_ring *tef_ring; 330 struct mcp251xfd_tx_ring *tx_ring; 331 struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; 332 struct mcp251xfd_tx_obj *tx_obj; 333 u32 val; 334 u16 addr; 335 u8 len; 336 int i, j; 337 338 /* TEF */ 339 tef_ring = priv->tef; 340 tef_ring->head = 0; 341 tef_ring->tail = 0; 342 343 /* FIFO increment TEF tail pointer */ 344 addr = MCP251XFD_REG_TEFCON; 345 val = MCP251XFD_REG_TEFCON_UINC; 346 len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, 347 addr, val, val); 348 349 for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { 350 struct spi_transfer *xfer; 351 352 xfer = &tef_ring->uinc_xfer[j]; 353 xfer->tx_buf = &tef_ring->uinc_buf; 354 xfer->len = len; 355 xfer->cs_change = 1; 356 xfer->cs_change_delay.value = 0; 357 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 358 } 359 360 /* TX */ 361 tx_ring = priv->tx; 362 tx_ring->head = 0; 363 tx_ring->tail = 0; 364 tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num); 365 366 /* FIFO request to send */ 367 addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO); 368 val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; 369 len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, 370 addr, val, val); 371 372 mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) 373 mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); 374 375 /* RX */ 376 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { 377 rx_ring->head = 0; 378 rx_ring->tail = 0; 379 rx_ring->nr = i; 380 rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i); 381 382 if (!prev_rx_ring) 383 rx_ring->base = 384 mcp251xfd_get_tx_obj_addr(tx_ring, 385 tx_ring->obj_num); 386 else 387 rx_ring->base = prev_rx_ring->base + 388 prev_rx_ring->obj_size * 389 prev_rx_ring->obj_num; 390 391 prev_rx_ring = rx_ring; 392 393 /* FIFO increment RX tail pointer */ 394 addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); 395 val = MCP251XFD_REG_FIFOCON_UINC; 396 len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, 397 addr, val, val); 398 399 for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { 400 struct spi_transfer *xfer; 401 402 xfer = &rx_ring->uinc_xfer[j]; 403 xfer->tx_buf = &rx_ring->uinc_buf; 404 xfer->len = len; 405 xfer->cs_change = 1; 406 xfer->cs_change_delay.value = 0; 407 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 408 } 409 } 410 } 411 412 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) 413 { 414 int i; 415 416 for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) { 417 kfree(priv->rx[i]); 418 priv->rx[i] = NULL; 419 } 420 } 421 422 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) 423 { 424 struct mcp251xfd_tx_ring *tx_ring; 425 struct mcp251xfd_rx_ring *rx_ring; 426 int tef_obj_size, tx_obj_size, rx_obj_size; 427 int tx_obj_num; 428 int ram_free, i; 429 430 tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj); 431 /* listen-only mode works like FD mode */ 432 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) { 433 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD; 434 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd); 435 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd); 436 } else { 437 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN; 438 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can); 439 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can); 440 } 441 442 tx_ring = priv->tx; 443 tx_ring->obj_num = tx_obj_num; 444 tx_ring->obj_size = tx_obj_size; 445 446 ram_free = MCP251XFD_RAM_SIZE - tx_obj_num * 447 (tef_obj_size + tx_obj_size); 448 449 for (i = 0; 450 i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size; 451 i++) { 452 int rx_obj_num; 453 454 rx_obj_num = ram_free / rx_obj_size; 455 rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 456 MCP251XFD_RX_OBJ_NUM_MAX); 457 458 rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, 459 GFP_KERNEL); 460 if (!rx_ring) { 461 mcp251xfd_ring_free(priv); 462 return -ENOMEM; 463 } 464 rx_ring->obj_num = rx_obj_num; 465 rx_ring->obj_size = rx_obj_size; 466 priv->rx[i] = rx_ring; 467 468 ram_free -= rx_ring->obj_num * rx_ring->obj_size; 469 } 470 priv->rx_ring_num = i; 471 472 netdev_dbg(priv->ndev, 473 "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n", 474 tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num, 475 tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num); 476 477 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { 478 netdev_dbg(priv->ndev, 479 "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n", 480 i, rx_ring->obj_num, rx_ring->obj_size, 481 rx_ring->obj_size * rx_ring->obj_num); 482 } 483 484 netdev_dbg(priv->ndev, 485 "FIFO setup: free: %d bytes\n", 486 ram_free); 487 488 return 0; 489 } 490 491 static inline int 492 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode) 493 { 494 u32 val; 495 int err; 496 497 err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val); 498 if (err) 499 return err; 500 501 *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val); 502 503 return 0; 504 } 505 506 static int 507 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, 508 const u8 mode_req, bool nowait) 509 { 510 u32 con, con_reqop; 511 int err; 512 513 con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req); 514 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON, 515 MCP251XFD_REG_CON_REQOP_MASK, con_reqop); 516 if (err) 517 return err; 518 519 if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait) 520 return 0; 521 522 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con, 523 FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, 524 con) == mode_req, 525 MCP251XFD_POLL_SLEEP_US, 526 MCP251XFD_POLL_TIMEOUT_US); 527 if (err) { 528 u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con); 529 530 netdev_err(priv->ndev, 531 "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n", 532 mcp251xfd_get_mode_str(mode_req), mode_req, 533 mcp251xfd_get_mode_str(mode), mode); 534 return err; 535 } 536 537 return 0; 538 } 539 540 static inline int 541 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, 542 const u8 mode_req) 543 { 544 return __mcp251xfd_chip_set_mode(priv, mode_req, false); 545 } 546 547 static inline int 548 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv, 549 const u8 mode_req) 550 { 551 return __mcp251xfd_chip_set_mode(priv, mode_req, true); 552 } 553 554 static inline bool mcp251xfd_osc_invalid(u32 reg) 555 { 556 return reg == 0x0 || reg == 0xffffffff; 557 } 558 559 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv) 560 { 561 u32 osc, osc_reference, osc_mask; 562 int err; 563 564 /* Set Power On Defaults for "Clock Output Divisor" and remove 565 * "Oscillator Disable" bit. 566 */ 567 osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, 568 MCP251XFD_REG_OSC_CLKODIV_10); 569 osc_reference = MCP251XFD_REG_OSC_OSCRDY; 570 osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY; 571 572 /* Note: 573 * 574 * If the controller is in Sleep Mode the following write only 575 * removes the "Oscillator Disable" bit and powers it up. All 576 * other bits are unaffected. 577 */ 578 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); 579 if (err) 580 return err; 581 582 /* Wait for "Oscillator Ready" bit */ 583 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc, 584 (osc & osc_mask) == osc_reference, 585 MCP251XFD_OSC_STAB_SLEEP_US, 586 MCP251XFD_OSC_STAB_TIMEOUT_US); 587 if (mcp251xfd_osc_invalid(osc)) { 588 netdev_err(priv->ndev, 589 "Failed to detect %s (osc=0x%08x).\n", 590 mcp251xfd_get_model_str(priv), osc); 591 return -ENODEV; 592 } else if (err == -ETIMEDOUT) { 593 netdev_err(priv->ndev, 594 "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n", 595 osc, osc_reference); 596 return -ETIMEDOUT; 597 } else if (err) { 598 return err; 599 } 600 601 return 0; 602 } 603 604 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) 605 { 606 const __be16 cmd = mcp251xfd_cmd_reset(); 607 int err; 608 609 /* The Set Mode and SPI Reset command only seems to works if 610 * the controller is not in Sleep Mode. 611 */ 612 err = mcp251xfd_chip_clock_enable(priv); 613 if (err) 614 return err; 615 616 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); 617 if (err) 618 return err; 619 620 /* spi_write_then_read() works with non DMA-safe buffers */ 621 return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0); 622 } 623 624 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv) 625 { 626 u32 osc, osc_reference; 627 u8 mode; 628 int err; 629 630 err = mcp251xfd_chip_get_mode(priv, &mode); 631 if (err) 632 return err; 633 634 if (mode != MCP251XFD_REG_CON_MODE_CONFIG) { 635 netdev_info(priv->ndev, 636 "Controller not in Config Mode after reset, but in %s Mode (%u).\n", 637 mcp251xfd_get_mode_str(mode), mode); 638 return -ETIMEDOUT; 639 } 640 641 osc_reference = MCP251XFD_REG_OSC_OSCRDY | 642 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, 643 MCP251XFD_REG_OSC_CLKODIV_10); 644 645 /* check reset defaults of OSC reg */ 646 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); 647 if (err) 648 return err; 649 650 if (osc != osc_reference) { 651 netdev_info(priv->ndev, 652 "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n", 653 osc, osc_reference); 654 return -ETIMEDOUT; 655 } 656 657 return 0; 658 } 659 660 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv) 661 { 662 int err, i; 663 664 for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) { 665 if (i) 666 netdev_info(priv->ndev, 667 "Retrying to reset Controller.\n"); 668 669 err = mcp251xfd_chip_softreset_do(priv); 670 if (err == -ETIMEDOUT) 671 continue; 672 if (err) 673 return err; 674 675 err = mcp251xfd_chip_softreset_check(priv); 676 if (err == -ETIMEDOUT) 677 continue; 678 if (err) 679 return err; 680 681 return 0; 682 } 683 684 return err; 685 } 686 687 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) 688 { 689 u32 osc; 690 int err; 691 692 /* Activate Low Power Mode on Oscillator Disable. This only 693 * works on the MCP2518FD. The MCP2517FD will go into normal 694 * Sleep Mode instead. 695 */ 696 osc = MCP251XFD_REG_OSC_LPMEN | 697 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, 698 MCP251XFD_REG_OSC_CLKODIV_10); 699 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); 700 if (err) 701 return err; 702 703 /* Set Time Base Counter Prescaler to 1. 704 * 705 * This means an overflow of the 32 bit Time Base Counter 706 * register at 40 MHz every 107 seconds. 707 */ 708 return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON, 709 MCP251XFD_REG_TSCON_TBCEN); 710 } 711 712 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) 713 { 714 const struct can_bittiming *bt = &priv->can.bittiming; 715 const struct can_bittiming *dbt = &priv->can.data_bittiming; 716 u32 val = 0; 717 s8 tdco; 718 int err; 719 720 /* CAN Control Register 721 * 722 * - no transmit bandwidth sharing 723 * - config mode 724 * - disable transmit queue 725 * - store in transmit FIFO event 726 * - transition to restricted operation mode on system error 727 * - ESI is transmitted recessive when ESI of message is high or 728 * CAN controller error passive 729 * - restricted retransmission attempts, 730 * use TQXCON_TXAT and FIFOCON_TXAT 731 * - wake-up filter bits T11FILTER 732 * - use CAN bus line filter for wakeup 733 * - protocol exception is treated as a form error 734 * - Do not compare data bytes 735 */ 736 val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, 737 MCP251XFD_REG_CON_MODE_CONFIG) | 738 MCP251XFD_REG_CON_STEF | 739 MCP251XFD_REG_CON_ESIGM | 740 MCP251XFD_REG_CON_RTXAT | 741 FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, 742 MCP251XFD_REG_CON_WFT_T11FILTER) | 743 MCP251XFD_REG_CON_WAKFIL | 744 MCP251XFD_REG_CON_PXEDIS; 745 746 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) 747 val |= MCP251XFD_REG_CON_ISOCRCEN; 748 749 err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val); 750 if (err) 751 return err; 752 753 /* Nominal Bit Time */ 754 val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) | 755 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK, 756 bt->prop_seg + bt->phase_seg1 - 1) | 757 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, 758 bt->phase_seg2 - 1) | 759 FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1); 760 761 err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val); 762 if (err) 763 return err; 764 765 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) 766 return 0; 767 768 /* Data Bit Time */ 769 val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) | 770 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK, 771 dbt->prop_seg + dbt->phase_seg1 - 1) | 772 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, 773 dbt->phase_seg2 - 1) | 774 FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1); 775 776 err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val); 777 if (err) 778 return err; 779 780 /* Transmitter Delay Compensation */ 781 tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), 782 -64, 63); 783 val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, 784 MCP251XFD_REG_TDC_TDCMOD_AUTO) | 785 FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); 786 787 return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); 788 } 789 790 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv) 791 { 792 u32 val; 793 794 if (!priv->rx_int) 795 return 0; 796 797 /* Configure GPIOs: 798 * - PIN0: GPIO Input 799 * - PIN1: GPIO Input/RX Interrupt 800 * 801 * PIN1 must be Input, otherwise there is a glitch on the 802 * rx-INT line. It happens between setting the PIN as output 803 * (in the first byte of the SPI transfer) and configuring the 804 * PIN as interrupt (in the last byte of the SPI transfer). 805 */ 806 val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 | 807 MCP251XFD_REG_IOCON_TRIS0; 808 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); 809 } 810 811 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) 812 { 813 u32 val; 814 815 if (!priv->rx_int) 816 return 0; 817 818 /* Configure GPIOs: 819 * - PIN0: GPIO Input 820 * - PIN1: GPIO Input 821 */ 822 val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 | 823 MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0; 824 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); 825 } 826 827 static int 828 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv, 829 const struct mcp251xfd_rx_ring *ring) 830 { 831 u32 fifo_con; 832 833 /* Enable RXOVIE on _all_ RX FIFOs, not just the last one. 834 * 835 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will 836 * generate a RXOVIF, use this to properly detect RX MAB 837 * overflows. 838 */ 839 fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, 840 ring->obj_num - 1) | 841 MCP251XFD_REG_FIFOCON_RXTSEN | 842 MCP251XFD_REG_FIFOCON_RXOVIE | 843 MCP251XFD_REG_FIFOCON_TFNRFNIE; 844 845 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) 846 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 847 MCP251XFD_REG_FIFOCON_PLSIZE_64); 848 else 849 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 850 MCP251XFD_REG_FIFOCON_PLSIZE_8); 851 852 return regmap_write(priv->map_reg, 853 MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con); 854 } 855 856 static int 857 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv, 858 const struct mcp251xfd_rx_ring *ring) 859 { 860 u32 fltcon; 861 862 fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) | 863 MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr); 864 865 return regmap_update_bits(priv->map_reg, 866 MCP251XFD_REG_FLTCON(ring->nr >> 2), 867 MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr), 868 fltcon); 869 } 870 871 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) 872 { 873 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 874 const struct mcp251xfd_rx_ring *rx_ring; 875 u32 val; 876 int err, n; 877 878 /* TEF */ 879 val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, 880 tx_ring->obj_num - 1) | 881 MCP251XFD_REG_TEFCON_TEFTSEN | 882 MCP251XFD_REG_TEFCON_TEFOVIE | 883 MCP251XFD_REG_TEFCON_TEFNEIE; 884 885 err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val); 886 if (err) 887 return err; 888 889 /* FIFO 1 - TX */ 890 val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, 891 tx_ring->obj_num - 1) | 892 MCP251XFD_REG_FIFOCON_TXEN | 893 MCP251XFD_REG_FIFOCON_TXATIE; 894 895 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) 896 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 897 MCP251XFD_REG_FIFOCON_PLSIZE_64); 898 else 899 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, 900 MCP251XFD_REG_FIFOCON_PLSIZE_8); 901 902 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 903 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, 904 MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT); 905 else 906 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, 907 MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); 908 909 err = regmap_write(priv->map_reg, 910 MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO), 911 val); 912 if (err) 913 return err; 914 915 /* RX FIFOs */ 916 mcp251xfd_for_each_rx_ring(priv, rx_ring, n) { 917 err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring); 918 if (err) 919 return err; 920 921 err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring); 922 if (err) 923 return err; 924 } 925 926 return 0; 927 } 928 929 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) 930 { 931 struct mcp251xfd_ecc *ecc = &priv->ecc; 932 void *ram; 933 u32 val = 0; 934 int err; 935 936 ecc->ecc_stat = 0; 937 938 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC) 939 val = MCP251XFD_REG_ECCCON_ECCEN; 940 941 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, 942 MCP251XFD_REG_ECCCON_ECCEN, val); 943 if (err) 944 return err; 945 946 ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL); 947 if (!ram) 948 return -ENOMEM; 949 950 err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram, 951 MCP251XFD_RAM_SIZE); 952 kfree(ram); 953 954 return err; 955 } 956 957 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv) 958 { 959 struct mcp251xfd_ecc *ecc = &priv->ecc; 960 961 ecc->ecc_stat = 0; 962 } 963 964 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) 965 { 966 u8 mode; 967 968 969 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 970 mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; 971 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 972 mode = MCP251XFD_REG_CON_MODE_LISTENONLY; 973 else if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 974 mode = MCP251XFD_REG_CON_MODE_MIXED; 975 else 976 mode = MCP251XFD_REG_CON_MODE_CAN2_0; 977 978 return mode; 979 } 980 981 static int 982 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv, 983 bool nowait) 984 { 985 u8 mode; 986 987 mode = mcp251xfd_get_normal_mode(priv); 988 989 return __mcp251xfd_chip_set_mode(priv, mode, nowait); 990 } 991 992 static inline int 993 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv) 994 { 995 return __mcp251xfd_chip_set_normal_mode(priv, false); 996 } 997 998 static inline int 999 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv) 1000 { 1001 return __mcp251xfd_chip_set_normal_mode(priv, true); 1002 } 1003 1004 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv) 1005 { 1006 u32 val; 1007 int err; 1008 1009 val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE; 1010 err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val); 1011 if (err) 1012 return err; 1013 1014 val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; 1015 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val); 1016 if (err) 1017 return err; 1018 1019 val = MCP251XFD_REG_INT_CERRIE | 1020 MCP251XFD_REG_INT_SERRIE | 1021 MCP251XFD_REG_INT_RXOVIE | 1022 MCP251XFD_REG_INT_TXATIE | 1023 MCP251XFD_REG_INT_SPICRCIE | 1024 MCP251XFD_REG_INT_ECCIE | 1025 MCP251XFD_REG_INT_TEFIE | 1026 MCP251XFD_REG_INT_MODIE | 1027 MCP251XFD_REG_INT_RXIE; 1028 1029 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 1030 val |= MCP251XFD_REG_INT_IVMIE; 1031 1032 return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val); 1033 } 1034 1035 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv) 1036 { 1037 int err; 1038 u32 mask; 1039 1040 err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0); 1041 if (err) 1042 return err; 1043 1044 mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; 1045 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, 1046 mask, 0x0); 1047 if (err) 1048 return err; 1049 1050 return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0); 1051 } 1052 1053 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, 1054 const enum can_state state) 1055 { 1056 priv->can.state = state; 1057 1058 mcp251xfd_chip_interrupts_disable(priv); 1059 mcp251xfd_chip_rx_int_disable(priv); 1060 return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); 1061 } 1062 1063 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) 1064 { 1065 int err; 1066 1067 err = mcp251xfd_chip_softreset(priv); 1068 if (err) 1069 goto out_chip_stop; 1070 1071 err = mcp251xfd_chip_clock_init(priv); 1072 if (err) 1073 goto out_chip_stop; 1074 1075 err = mcp251xfd_set_bittiming(priv); 1076 if (err) 1077 goto out_chip_stop; 1078 1079 err = mcp251xfd_chip_rx_int_enable(priv); 1080 if (err) 1081 return err; 1082 1083 err = mcp251xfd_chip_ecc_init(priv); 1084 if (err) 1085 goto out_chip_stop; 1086 1087 mcp251xfd_ring_init(priv); 1088 1089 err = mcp251xfd_chip_fifo_init(priv); 1090 if (err) 1091 goto out_chip_stop; 1092 1093 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1094 1095 err = mcp251xfd_chip_set_normal_mode(priv); 1096 if (err) 1097 goto out_chip_stop; 1098 1099 return 0; 1100 1101 out_chip_stop: 1102 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 1103 1104 return err; 1105 } 1106 1107 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode) 1108 { 1109 struct mcp251xfd_priv *priv = netdev_priv(ndev); 1110 int err; 1111 1112 switch (mode) { 1113 case CAN_MODE_START: 1114 err = mcp251xfd_chip_start(priv); 1115 if (err) 1116 return err; 1117 1118 err = mcp251xfd_chip_interrupts_enable(priv); 1119 if (err) { 1120 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 1121 return err; 1122 } 1123 1124 netif_wake_queue(ndev); 1125 break; 1126 1127 default: 1128 return -EOPNOTSUPP; 1129 } 1130 1131 return 0; 1132 } 1133 1134 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev, 1135 struct can_berr_counter *bec) 1136 { 1137 const struct mcp251xfd_priv *priv = netdev_priv(ndev); 1138 u32 trec; 1139 int err; 1140 1141 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); 1142 if (err) 1143 return err; 1144 1145 if (trec & MCP251XFD_REG_TREC_TXBO) 1146 bec->txerr = 256; 1147 else 1148 bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec); 1149 bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec); 1150 1151 return 0; 1152 } 1153 1154 static int mcp251xfd_get_berr_counter(const struct net_device *ndev, 1155 struct can_berr_counter *bec) 1156 { 1157 const struct mcp251xfd_priv *priv = netdev_priv(ndev); 1158 1159 /* Avoid waking up the controller if the interface is down */ 1160 if (!(ndev->flags & IFF_UP)) 1161 return 0; 1162 1163 /* The controller is powered down during Bus Off, use saved 1164 * bec values. 1165 */ 1166 if (priv->can.state == CAN_STATE_BUS_OFF) { 1167 *bec = priv->bec; 1168 return 0; 1169 } 1170 1171 return __mcp251xfd_get_berr_counter(ndev, bec); 1172 } 1173 1174 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) 1175 { 1176 u8 tef_tail_chip, tef_tail; 1177 int err; 1178 1179 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) 1180 return 0; 1181 1182 err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip); 1183 if (err) 1184 return err; 1185 1186 tef_tail = mcp251xfd_get_tef_tail(priv); 1187 if (tef_tail_chip != tef_tail) { 1188 netdev_err(priv->ndev, 1189 "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n", 1190 tef_tail_chip, tef_tail); 1191 return -EILSEQ; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int 1198 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, 1199 const struct mcp251xfd_rx_ring *ring) 1200 { 1201 u8 rx_tail_chip, rx_tail; 1202 int err; 1203 1204 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) 1205 return 0; 1206 1207 err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip); 1208 if (err) 1209 return err; 1210 1211 rx_tail = mcp251xfd_get_rx_tail(ring); 1212 if (rx_tail_chip != rx_tail) { 1213 netdev_err(priv->ndev, 1214 "RX tail of chip (%d) and ours (%d) inconsistent.\n", 1215 rx_tail_chip, rx_tail); 1216 return -EILSEQ; 1217 } 1218 1219 return 0; 1220 } 1221 1222 static int 1223 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) 1224 { 1225 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1226 u32 tef_sta; 1227 int err; 1228 1229 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); 1230 if (err) 1231 return err; 1232 1233 if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { 1234 netdev_err(priv->ndev, 1235 "Transmit Event FIFO buffer overflow.\n"); 1236 return -ENOBUFS; 1237 } 1238 1239 netdev_info(priv->ndev, 1240 "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n", 1241 tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? 1242 "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? 1243 "not empty" : "empty", 1244 seq, priv->tef->tail, priv->tef->head, tx_ring->head); 1245 1246 /* The Sequence Number in the TEF doesn't match our tef_tail. */ 1247 return -EAGAIN; 1248 } 1249 1250 static int 1251 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, 1252 const struct mcp251xfd_hw_tef_obj *hw_tef_obj) 1253 { 1254 struct net_device_stats *stats = &priv->ndev->stats; 1255 u32 seq, seq_masked, tef_tail_masked; 1256 1257 seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, 1258 hw_tef_obj->flags); 1259 1260 /* Use the MCP2517FD mask on the MCP2518FD, too. We only 1261 * compare 7 bits, this should be enough to detect 1262 * net-yet-completed, i.e. old TEF objects. 1263 */ 1264 seq_masked = seq & 1265 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); 1266 tef_tail_masked = priv->tef->tail & 1267 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); 1268 if (seq_masked != tef_tail_masked) 1269 return mcp251xfd_handle_tefif_recover(priv, seq); 1270 1271 stats->tx_bytes += 1272 can_rx_offload_get_echo_skb(&priv->offload, 1273 mcp251xfd_get_tef_tail(priv), 1274 hw_tef_obj->ts); 1275 stats->tx_packets++; 1276 priv->tef->tail++; 1277 1278 return 0; 1279 } 1280 1281 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) 1282 { 1283 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1284 unsigned int new_head; 1285 u8 chip_tx_tail; 1286 int err; 1287 1288 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); 1289 if (err) 1290 return err; 1291 1292 /* chip_tx_tail, is the next TX-Object send by the HW. 1293 * The new TEF head must be >= the old head, ... 1294 */ 1295 new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; 1296 if (new_head <= priv->tef->head) 1297 new_head += tx_ring->obj_num; 1298 1299 /* ... but it cannot exceed the TX head. */ 1300 priv->tef->head = min(new_head, tx_ring->head); 1301 1302 return mcp251xfd_check_tef_tail(priv); 1303 } 1304 1305 static inline int 1306 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, 1307 struct mcp251xfd_hw_tef_obj *hw_tef_obj, 1308 const u8 offset, const u8 len) 1309 { 1310 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1311 1312 if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && 1313 (offset > tx_ring->obj_num || 1314 len > tx_ring->obj_num || 1315 offset + len > tx_ring->obj_num)) { 1316 netdev_err(priv->ndev, 1317 "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n", 1318 tx_ring->obj_num, offset, len); 1319 return -ERANGE; 1320 } 1321 1322 return regmap_bulk_read(priv->map_rx, 1323 mcp251xfd_get_tef_obj_addr(offset), 1324 hw_tef_obj, 1325 sizeof(*hw_tef_obj) / sizeof(u32) * len); 1326 } 1327 1328 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) 1329 { 1330 struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; 1331 u8 tef_tail, len, l; 1332 int err, i; 1333 1334 err = mcp251xfd_tef_ring_update(priv); 1335 if (err) 1336 return err; 1337 1338 tef_tail = mcp251xfd_get_tef_tail(priv); 1339 len = mcp251xfd_get_tef_len(priv); 1340 l = mcp251xfd_get_tef_linear_len(priv); 1341 err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); 1342 if (err) 1343 return err; 1344 1345 if (l < len) { 1346 err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l); 1347 if (err) 1348 return err; 1349 } 1350 1351 for (i = 0; i < len; i++) { 1352 err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]); 1353 /* -EAGAIN means the Sequence Number in the TEF 1354 * doesn't match our tef_tail. This can happen if we 1355 * read the TEF objects too early. Leave loop let the 1356 * interrupt handler call us again. 1357 */ 1358 if (err == -EAGAIN) 1359 goto out_netif_wake_queue; 1360 if (err) 1361 return err; 1362 } 1363 1364 out_netif_wake_queue: 1365 len = i; /* number of handled goods TEFs */ 1366 if (len) { 1367 struct mcp251xfd_tef_ring *ring = priv->tef; 1368 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1369 struct spi_transfer *last_xfer; 1370 1371 /* Increment the TEF FIFO tail pointer 'len' times in 1372 * a single SPI message. 1373 * 1374 * Note: 1375 * 1376 * "cs_change == 1" on the last transfer results in an 1377 * active chip select after the complete SPI 1378 * message. This causes the controller to interpret 1379 * the next register access as data. Temporary set 1380 * "cs_change" of the last transfer to "0" to properly 1381 * deactivate the chip select at the end of the 1382 * message. 1383 */ 1384 last_xfer = &ring->uinc_xfer[len - 1]; 1385 last_xfer->cs_change = 0; 1386 err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); 1387 last_xfer->cs_change = 1; 1388 if (err) 1389 return err; 1390 1391 tx_ring->tail += len; 1392 1393 err = mcp251xfd_check_tef_tail(priv); 1394 if (err) 1395 return err; 1396 } 1397 1398 mcp251xfd_ecc_tefif_successful(priv); 1399 1400 if (mcp251xfd_get_tx_free(priv->tx)) { 1401 /* Make sure that anybody stopping the queue after 1402 * this sees the new tx_ring->tail. 1403 */ 1404 smp_mb(); 1405 netif_wake_queue(priv->ndev); 1406 } 1407 1408 return 0; 1409 } 1410 1411 static int 1412 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, 1413 struct mcp251xfd_rx_ring *ring) 1414 { 1415 u32 new_head; 1416 u8 chip_rx_head; 1417 int err; 1418 1419 err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head); 1420 if (err) 1421 return err; 1422 1423 /* chip_rx_head, is the next RX-Object filled by the HW. 1424 * The new RX head must be >= the old head. 1425 */ 1426 new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; 1427 if (new_head <= ring->head) 1428 new_head += ring->obj_num; 1429 1430 ring->head = new_head; 1431 1432 return mcp251xfd_check_rx_tail(priv, ring); 1433 } 1434 1435 static void 1436 mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, 1437 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, 1438 struct sk_buff *skb) 1439 { 1440 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 1441 1442 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) { 1443 u32 sid, eid; 1444 1445 eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id); 1446 sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); 1447 1448 cfd->can_id = CAN_EFF_FLAG | 1449 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) | 1450 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid); 1451 } else { 1452 cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, 1453 hw_rx_obj->id); 1454 } 1455 1456 /* CANFD */ 1457 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) { 1458 u8 dlc; 1459 1460 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI) 1461 cfd->flags |= CANFD_ESI; 1462 1463 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS) 1464 cfd->flags |= CANFD_BRS; 1465 1466 dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags); 1467 cfd->len = can_fd_dlc2len(dlc); 1468 } else { 1469 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) 1470 cfd->can_id |= CAN_RTR_FLAG; 1471 1472 cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, 1473 hw_rx_obj->flags)); 1474 } 1475 1476 memcpy(cfd->data, hw_rx_obj->data, cfd->len); 1477 } 1478 1479 static int 1480 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, 1481 struct mcp251xfd_rx_ring *ring, 1482 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) 1483 { 1484 struct net_device_stats *stats = &priv->ndev->stats; 1485 struct sk_buff *skb; 1486 struct canfd_frame *cfd; 1487 int err; 1488 1489 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) 1490 skb = alloc_canfd_skb(priv->ndev, &cfd); 1491 else 1492 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd); 1493 1494 if (!skb) { 1495 stats->rx_dropped++; 1496 return 0; 1497 } 1498 1499 mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); 1500 err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts); 1501 if (err) 1502 stats->rx_fifo_errors++; 1503 1504 return 0; 1505 } 1506 1507 static inline int 1508 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, 1509 const struct mcp251xfd_rx_ring *ring, 1510 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, 1511 const u8 offset, const u8 len) 1512 { 1513 int err; 1514 1515 err = regmap_bulk_read(priv->map_rx, 1516 mcp251xfd_get_rx_obj_addr(ring, offset), 1517 hw_rx_obj, 1518 len * ring->obj_size / sizeof(u32)); 1519 1520 return err; 1521 } 1522 1523 static int 1524 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, 1525 struct mcp251xfd_rx_ring *ring) 1526 { 1527 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; 1528 u8 rx_tail, len; 1529 int err, i; 1530 1531 err = mcp251xfd_rx_ring_update(priv, ring); 1532 if (err) 1533 return err; 1534 1535 while ((len = mcp251xfd_get_rx_linear_len(ring))) { 1536 struct spi_transfer *last_xfer; 1537 1538 rx_tail = mcp251xfd_get_rx_tail(ring); 1539 1540 err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, 1541 rx_tail, len); 1542 if (err) 1543 return err; 1544 1545 for (i = 0; i < len; i++) { 1546 err = mcp251xfd_handle_rxif_one(priv, ring, 1547 (void *)hw_rx_obj + 1548 i * ring->obj_size); 1549 if (err) 1550 return err; 1551 } 1552 1553 /* Increment the RX FIFO tail pointer 'len' times in a 1554 * single SPI message. 1555 * 1556 * Note: 1557 * 1558 * "cs_change == 1" on the last transfer results in an 1559 * active chip select after the complete SPI 1560 * message. This causes the controller to interpret 1561 * the next register access as data. Temporary set 1562 * "cs_change" of the last transfer to "0" to properly 1563 * deactivate the chip select at the end of the 1564 * message. 1565 */ 1566 last_xfer = &ring->uinc_xfer[len - 1]; 1567 last_xfer->cs_change = 0; 1568 err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); 1569 last_xfer->cs_change = 1; 1570 if (err) 1571 return err; 1572 1573 ring->tail += len; 1574 } 1575 1576 return 0; 1577 } 1578 1579 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) 1580 { 1581 struct mcp251xfd_rx_ring *ring; 1582 int err, n; 1583 1584 mcp251xfd_for_each_rx_ring(priv, ring, n) { 1585 err = mcp251xfd_handle_rxif_ring(priv, ring); 1586 if (err) 1587 return err; 1588 } 1589 1590 return 0; 1591 } 1592 1593 static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, 1594 u32 *timestamp) 1595 { 1596 return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); 1597 } 1598 1599 static struct sk_buff * 1600 mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv, 1601 struct can_frame **cf, u32 *timestamp) 1602 { 1603 int err; 1604 1605 err = mcp251xfd_get_timestamp(priv, timestamp); 1606 if (err) 1607 return NULL; 1608 1609 return alloc_can_err_skb(priv->ndev, cf); 1610 } 1611 1612 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) 1613 { 1614 struct net_device_stats *stats = &priv->ndev->stats; 1615 struct mcp251xfd_rx_ring *ring; 1616 struct sk_buff *skb; 1617 struct can_frame *cf; 1618 u32 timestamp, rxovif; 1619 int err, i; 1620 1621 stats->rx_over_errors++; 1622 stats->rx_errors++; 1623 1624 err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif); 1625 if (err) 1626 return err; 1627 1628 mcp251xfd_for_each_rx_ring(priv, ring, i) { 1629 if (!(rxovif & BIT(ring->fifo_nr))) 1630 continue; 1631 1632 /* If SERRIF is active, there was a RX MAB overflow. */ 1633 if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) { 1634 netdev_info(priv->ndev, 1635 "RX-%d: MAB overflow detected.\n", 1636 ring->nr); 1637 } else { 1638 netdev_info(priv->ndev, 1639 "RX-%d: FIFO overflow.\n", ring->nr); 1640 } 1641 1642 err = regmap_update_bits(priv->map_reg, 1643 MCP251XFD_REG_FIFOSTA(ring->fifo_nr), 1644 MCP251XFD_REG_FIFOSTA_RXOVIF, 1645 0x0); 1646 if (err) 1647 return err; 1648 } 1649 1650 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); 1651 if (!skb) 1652 return 0; 1653 1654 cf->can_id |= CAN_ERR_CRTL; 1655 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 1656 1657 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1658 if (err) 1659 stats->rx_fifo_errors++; 1660 1661 return 0; 1662 } 1663 1664 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) 1665 { 1666 netdev_info(priv->ndev, "%s\n", __func__); 1667 1668 return 0; 1669 } 1670 1671 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) 1672 { 1673 struct net_device_stats *stats = &priv->ndev->stats; 1674 u32 bdiag1, timestamp; 1675 struct sk_buff *skb; 1676 struct can_frame *cf = NULL; 1677 int err; 1678 1679 err = mcp251xfd_get_timestamp(priv, ×tamp); 1680 if (err) 1681 return err; 1682 1683 err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1); 1684 if (err) 1685 return err; 1686 1687 /* Write 0s to clear error bits, don't write 1s to non active 1688 * bits, as they will be set. 1689 */ 1690 err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0); 1691 if (err) 1692 return err; 1693 1694 priv->can.can_stats.bus_error++; 1695 1696 skb = alloc_can_err_skb(priv->ndev, &cf); 1697 if (cf) 1698 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1699 1700 /* Controller misconfiguration */ 1701 if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM)) 1702 netdev_err(priv->ndev, 1703 "recv'd DLC is larger than PLSIZE of FIFO element."); 1704 1705 /* RX errors */ 1706 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR | 1707 MCP251XFD_REG_BDIAG1_NCRCERR)) { 1708 netdev_dbg(priv->ndev, "CRC error\n"); 1709 1710 stats->rx_errors++; 1711 if (cf) 1712 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 1713 } 1714 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR | 1715 MCP251XFD_REG_BDIAG1_NSTUFERR)) { 1716 netdev_dbg(priv->ndev, "Stuff error\n"); 1717 1718 stats->rx_errors++; 1719 if (cf) 1720 cf->data[2] |= CAN_ERR_PROT_STUFF; 1721 } 1722 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR | 1723 MCP251XFD_REG_BDIAG1_NFORMERR)) { 1724 netdev_dbg(priv->ndev, "Format error\n"); 1725 1726 stats->rx_errors++; 1727 if (cf) 1728 cf->data[2] |= CAN_ERR_PROT_FORM; 1729 } 1730 1731 /* TX errors */ 1732 if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) { 1733 netdev_dbg(priv->ndev, "NACK error\n"); 1734 1735 stats->tx_errors++; 1736 if (cf) { 1737 cf->can_id |= CAN_ERR_ACK; 1738 cf->data[2] |= CAN_ERR_PROT_TX; 1739 } 1740 } 1741 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR | 1742 MCP251XFD_REG_BDIAG1_NBIT1ERR)) { 1743 netdev_dbg(priv->ndev, "Bit1 error\n"); 1744 1745 stats->tx_errors++; 1746 if (cf) 1747 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1; 1748 } 1749 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR | 1750 MCP251XFD_REG_BDIAG1_NBIT0ERR)) { 1751 netdev_dbg(priv->ndev, "Bit0 error\n"); 1752 1753 stats->tx_errors++; 1754 if (cf) 1755 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0; 1756 } 1757 1758 if (!cf) 1759 return 0; 1760 1761 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1762 if (err) 1763 stats->rx_fifo_errors++; 1764 1765 return 0; 1766 } 1767 1768 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) 1769 { 1770 struct net_device_stats *stats = &priv->ndev->stats; 1771 struct sk_buff *skb; 1772 struct can_frame *cf = NULL; 1773 enum can_state new_state, rx_state, tx_state; 1774 u32 trec, timestamp; 1775 int err; 1776 1777 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); 1778 if (err) 1779 return err; 1780 1781 if (trec & MCP251XFD_REG_TREC_TXBO) 1782 tx_state = CAN_STATE_BUS_OFF; 1783 else if (trec & MCP251XFD_REG_TREC_TXBP) 1784 tx_state = CAN_STATE_ERROR_PASSIVE; 1785 else if (trec & MCP251XFD_REG_TREC_TXWARN) 1786 tx_state = CAN_STATE_ERROR_WARNING; 1787 else 1788 tx_state = CAN_STATE_ERROR_ACTIVE; 1789 1790 if (trec & MCP251XFD_REG_TREC_RXBP) 1791 rx_state = CAN_STATE_ERROR_PASSIVE; 1792 else if (trec & MCP251XFD_REG_TREC_RXWARN) 1793 rx_state = CAN_STATE_ERROR_WARNING; 1794 else 1795 rx_state = CAN_STATE_ERROR_ACTIVE; 1796 1797 new_state = max(tx_state, rx_state); 1798 if (new_state == priv->can.state) 1799 return 0; 1800 1801 /* The skb allocation might fail, but can_change_state() 1802 * handles cf == NULL. 1803 */ 1804 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); 1805 can_change_state(priv->ndev, cf, tx_state, rx_state); 1806 1807 if (new_state == CAN_STATE_BUS_OFF) { 1808 /* As we're going to switch off the chip now, let's 1809 * save the error counters and return them to 1810 * userspace, if do_get_berr_counter() is called while 1811 * the chip is in Bus Off. 1812 */ 1813 err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec); 1814 if (err) 1815 return err; 1816 1817 mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF); 1818 can_bus_off(priv->ndev); 1819 } 1820 1821 if (!skb) 1822 return 0; 1823 1824 if (new_state != CAN_STATE_BUS_OFF) { 1825 struct can_berr_counter bec; 1826 1827 err = mcp251xfd_get_berr_counter(priv->ndev, &bec); 1828 if (err) 1829 return err; 1830 cf->data[6] = bec.txerr; 1831 cf->data[7] = bec.rxerr; 1832 } 1833 1834 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1835 if (err) 1836 stats->rx_fifo_errors++; 1837 1838 return 0; 1839 } 1840 1841 static int 1842 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode) 1843 { 1844 const u8 mode_reference = mcp251xfd_get_normal_mode(priv); 1845 u8 mode; 1846 int err; 1847 1848 err = mcp251xfd_chip_get_mode(priv, &mode); 1849 if (err) 1850 return err; 1851 1852 if (mode == mode_reference) { 1853 netdev_dbg(priv->ndev, 1854 "Controller changed into %s Mode (%u).\n", 1855 mcp251xfd_get_mode_str(mode), mode); 1856 return 0; 1857 } 1858 1859 /* According to MCP2517FD errata DS80000792B 1., during a TX 1860 * MAB underflow, the controller will transition to Restricted 1861 * Operation Mode or Listen Only Mode (depending on SERR2LOM). 1862 * 1863 * However this is not always the case. If SERR2LOM is 1864 * configured for Restricted Operation Mode (SERR2LOM not set) 1865 * the MCP2517FD will sometimes transition to Listen Only Mode 1866 * first. When polling this bit we see that it will transition 1867 * to Restricted Operation Mode shortly after. 1868 */ 1869 if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) && 1870 (mode == MCP251XFD_REG_CON_MODE_RESTRICTED || 1871 mode == MCP251XFD_REG_CON_MODE_LISTENONLY)) 1872 netdev_dbg(priv->ndev, 1873 "Controller changed into %s Mode (%u).\n", 1874 mcp251xfd_get_mode_str(mode), mode); 1875 else 1876 netdev_err(priv->ndev, 1877 "Controller changed into %s Mode (%u).\n", 1878 mcp251xfd_get_mode_str(mode), mode); 1879 1880 /* After the application requests Normal mode, the Controller 1881 * will automatically attempt to retransmit the message that 1882 * caused the TX MAB underflow. 1883 * 1884 * However, if there is an ECC error in the TX-RAM, we first 1885 * have to reload the tx-object before requesting Normal 1886 * mode. This is done later in mcp251xfd_handle_eccif(). 1887 */ 1888 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) { 1889 *set_normal_mode = true; 1890 return 0; 1891 } 1892 1893 return mcp251xfd_chip_set_normal_mode_nowait(priv); 1894 } 1895 1896 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv) 1897 { 1898 struct mcp251xfd_ecc *ecc = &priv->ecc; 1899 struct net_device_stats *stats = &priv->ndev->stats; 1900 bool handled = false; 1901 1902 /* TX MAB underflow 1903 * 1904 * According to MCP2517FD Errata DS80000792B 1. a TX MAB 1905 * underflow is indicated by SERRIF and MODIF. 1906 * 1907 * In addition to the effects mentioned in the Errata, there 1908 * are Bus Errors due to the aborted CAN frame, so a IVMIF 1909 * will be seen as well. 1910 * 1911 * Sometimes there is an ECC error in the TX-RAM, which leads 1912 * to a TX MAB underflow. 1913 * 1914 * However, probably due to a race condition, there is no 1915 * associated MODIF pending. 1916 * 1917 * Further, there are situations, where the SERRIF is caused 1918 * by an ECC error in the TX-RAM, but not even the ECCIF is 1919 * set. This only seems to happen _after_ the first occurrence 1920 * of a ECCIF (which is tracked in ecc->cnt). 1921 * 1922 * Treat all as a known system errors.. 1923 */ 1924 if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF && 1925 priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) || 1926 priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || 1927 ecc->cnt) { 1928 const char *msg; 1929 1930 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || 1931 ecc->cnt) 1932 msg = "TX MAB underflow due to ECC error detected."; 1933 else 1934 msg = "TX MAB underflow detected."; 1935 1936 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) 1937 netdev_dbg(priv->ndev, "%s\n", msg); 1938 else 1939 netdev_info(priv->ndev, "%s\n", msg); 1940 1941 stats->tx_aborted_errors++; 1942 stats->tx_errors++; 1943 handled = true; 1944 } 1945 1946 /* RX MAB overflow 1947 * 1948 * According to MCP2517FD Errata DS80000792B 1. a RX MAB 1949 * overflow is indicated by SERRIF. 1950 * 1951 * In addition to the effects mentioned in the Errata, (most 1952 * of the times) a RXOVIF is raised, if the FIFO that is being 1953 * received into has the RXOVIE activated (and we have enabled 1954 * RXOVIE on all FIFOs). 1955 * 1956 * Sometimes there is no RXOVIF just a RXIF is pending. 1957 * 1958 * Treat all as a known system errors.. 1959 */ 1960 if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF || 1961 priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) { 1962 stats->rx_dropped++; 1963 handled = true; 1964 } 1965 1966 if (!handled) 1967 netdev_err(priv->ndev, 1968 "Unhandled System Error Interrupt (intf=0x%08x)!\n", 1969 priv->regs_status.intf); 1970 1971 return 0; 1972 } 1973 1974 static int 1975 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr) 1976 { 1977 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1978 struct mcp251xfd_ecc *ecc = &priv->ecc; 1979 struct mcp251xfd_tx_obj *tx_obj; 1980 u8 chip_tx_tail, tx_tail, offset; 1981 u16 addr; 1982 int err; 1983 1984 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat); 1985 1986 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); 1987 if (err) 1988 return err; 1989 1990 tx_tail = mcp251xfd_get_tx_tail(tx_ring); 1991 offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1); 1992 1993 /* Bail out if one of the following is met: 1994 * - tx_tail information is inconsistent 1995 * - for mcp2517fd: offset not 0 1996 * - for mcp2518fd: offset not 0 or 1 1997 */ 1998 if (chip_tx_tail != tx_tail || 1999 !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) { 2000 netdev_err(priv->ndev, 2001 "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n", 2002 addr, nr, tx_ring->tail, tx_tail, chip_tx_tail, 2003 offset); 2004 return -EINVAL; 2005 } 2006 2007 netdev_info(priv->ndev, 2008 "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n", 2009 ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ? 2010 "Single" : "Double", 2011 addr, nr, tx_ring->tail, tx_tail, offset); 2012 2013 /* reload tx_obj into controller RAM ... */ 2014 tx_obj = &tx_ring->obj[nr]; 2015 err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1); 2016 if (err) 2017 return err; 2018 2019 /* ... and trigger retransmit */ 2020 return mcp251xfd_chip_set_normal_mode(priv); 2021 } 2022 2023 static int 2024 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode) 2025 { 2026 struct mcp251xfd_ecc *ecc = &priv->ecc; 2027 const char *msg; 2028 bool in_tx_ram; 2029 u32 ecc_stat; 2030 u16 addr; 2031 u8 nr; 2032 int err; 2033 2034 err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat); 2035 if (err) 2036 return err; 2037 2038 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT, 2039 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat); 2040 if (err) 2041 return err; 2042 2043 /* Check if ECC error occurred in TX-RAM */ 2044 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat); 2045 err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr); 2046 if (!err) 2047 in_tx_ram = true; 2048 else if (err == -ENOENT) 2049 in_tx_ram = false; 2050 else 2051 return err; 2052 2053 /* Errata Reference: 2054 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2. 2055 * 2056 * ECC single error correction does not work in all cases: 2057 * 2058 * Fix/Work Around: 2059 * Enable single error correction and double error detection 2060 * interrupts by setting SECIE and DEDIE. Handle SECIF as a 2061 * detection interrupt and do not rely on the error 2062 * correction. Instead, handle both interrupts as a 2063 * notification that the RAM word at ERRADDR was corrupted. 2064 */ 2065 if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF) 2066 msg = "Single ECC Error detected at address"; 2067 else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF) 2068 msg = "Double ECC Error detected at address"; 2069 else 2070 return -EINVAL; 2071 2072 if (!in_tx_ram) { 2073 ecc->ecc_stat = 0; 2074 2075 netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr); 2076 } else { 2077 /* Re-occurring error? */ 2078 if (ecc->ecc_stat == ecc_stat) { 2079 ecc->cnt++; 2080 } else { 2081 ecc->ecc_stat = ecc_stat; 2082 ecc->cnt = 1; 2083 } 2084 2085 netdev_info(priv->ndev, 2086 "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n", 2087 msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : ""); 2088 2089 if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX) 2090 return mcp251xfd_handle_eccif_recover(priv, nr); 2091 } 2092 2093 if (set_normal_mode) 2094 return mcp251xfd_chip_set_normal_mode_nowait(priv); 2095 2096 return 0; 2097 } 2098 2099 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv) 2100 { 2101 int err; 2102 u32 crc; 2103 2104 err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc); 2105 if (err) 2106 return err; 2107 2108 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC, 2109 MCP251XFD_REG_CRC_IF_MASK, 2110 ~crc); 2111 if (err) 2112 return err; 2113 2114 if (crc & MCP251XFD_REG_CRC_FERRIF) 2115 netdev_notice(priv->ndev, "CRC write command format error.\n"); 2116 else if (crc & MCP251XFD_REG_CRC_CRCERRIF) 2117 netdev_notice(priv->ndev, 2118 "CRC write error detected. CRC=0x%04lx.\n", 2119 FIELD_GET(MCP251XFD_REG_CRC_MASK, crc)); 2120 2121 return 0; 2122 } 2123 2124 #define mcp251xfd_handle(priv, irq, ...) \ 2125 ({ \ 2126 struct mcp251xfd_priv *_priv = (priv); \ 2127 int err; \ 2128 \ 2129 err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \ 2130 if (err) \ 2131 netdev_err(_priv->ndev, \ 2132 "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \ 2133 __stringify(irq), err); \ 2134 err; \ 2135 }) 2136 2137 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) 2138 { 2139 struct mcp251xfd_priv *priv = dev_id; 2140 irqreturn_t handled = IRQ_NONE; 2141 int err; 2142 2143 if (priv->rx_int) 2144 do { 2145 int rx_pending; 2146 2147 rx_pending = gpiod_get_value_cansleep(priv->rx_int); 2148 if (!rx_pending) 2149 break; 2150 2151 err = mcp251xfd_handle(priv, rxif); 2152 if (err) 2153 goto out_fail; 2154 2155 handled = IRQ_HANDLED; 2156 } while (1); 2157 2158 do { 2159 u32 intf_pending, intf_pending_clearable; 2160 bool set_normal_mode = false; 2161 2162 err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT, 2163 &priv->regs_status, 2164 sizeof(priv->regs_status) / 2165 sizeof(u32)); 2166 if (err) 2167 goto out_fail; 2168 2169 intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK, 2170 priv->regs_status.intf) & 2171 FIELD_GET(MCP251XFD_REG_INT_IE_MASK, 2172 priv->regs_status.intf); 2173 2174 if (!(intf_pending)) 2175 return handled; 2176 2177 /* Some interrupts must be ACKed in the 2178 * MCP251XFD_REG_INT register. 2179 * - First ACK then handle, to avoid lost-IRQ race 2180 * condition on fast re-occurring interrupts. 2181 * - Write "0" to clear active IRQs, "1" to all other, 2182 * to avoid r/m/w race condition on the 2183 * MCP251XFD_REG_INT register. 2184 */ 2185 intf_pending_clearable = intf_pending & 2186 MCP251XFD_REG_INT_IF_CLEARABLE_MASK; 2187 if (intf_pending_clearable) { 2188 err = regmap_update_bits(priv->map_reg, 2189 MCP251XFD_REG_INT, 2190 MCP251XFD_REG_INT_IF_MASK, 2191 ~intf_pending_clearable); 2192 if (err) 2193 goto out_fail; 2194 } 2195 2196 if (intf_pending & MCP251XFD_REG_INT_MODIF) { 2197 err = mcp251xfd_handle(priv, modif, &set_normal_mode); 2198 if (err) 2199 goto out_fail; 2200 } 2201 2202 if (intf_pending & MCP251XFD_REG_INT_RXIF) { 2203 err = mcp251xfd_handle(priv, rxif); 2204 if (err) 2205 goto out_fail; 2206 } 2207 2208 if (intf_pending & MCP251XFD_REG_INT_TEFIF) { 2209 err = mcp251xfd_handle(priv, tefif); 2210 if (err) 2211 goto out_fail; 2212 } 2213 2214 if (intf_pending & MCP251XFD_REG_INT_RXOVIF) { 2215 err = mcp251xfd_handle(priv, rxovif); 2216 if (err) 2217 goto out_fail; 2218 } 2219 2220 if (intf_pending & MCP251XFD_REG_INT_TXATIF) { 2221 err = mcp251xfd_handle(priv, txatif); 2222 if (err) 2223 goto out_fail; 2224 } 2225 2226 if (intf_pending & MCP251XFD_REG_INT_IVMIF) { 2227 err = mcp251xfd_handle(priv, ivmif); 2228 if (err) 2229 goto out_fail; 2230 } 2231 2232 if (intf_pending & MCP251XFD_REG_INT_SERRIF) { 2233 err = mcp251xfd_handle(priv, serrif); 2234 if (err) 2235 goto out_fail; 2236 } 2237 2238 if (intf_pending & MCP251XFD_REG_INT_ECCIF) { 2239 err = mcp251xfd_handle(priv, eccif, set_normal_mode); 2240 if (err) 2241 goto out_fail; 2242 } 2243 2244 if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) { 2245 err = mcp251xfd_handle(priv, spicrcif); 2246 if (err) 2247 goto out_fail; 2248 } 2249 2250 /* On the MCP2527FD and MCP2518FD, we don't get a 2251 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX 2252 * ERROR_ACTIVE. 2253 */ 2254 if (intf_pending & MCP251XFD_REG_INT_CERRIF || 2255 priv->can.state > CAN_STATE_ERROR_ACTIVE) { 2256 err = mcp251xfd_handle(priv, cerrif); 2257 if (err) 2258 goto out_fail; 2259 2260 /* In Bus Off we completely shut down the 2261 * controller. Every subsequent register read 2262 * will read bogus data, and if 2263 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC 2264 * check will fail, too. So leave IRQ handler 2265 * directly. 2266 */ 2267 if (priv->can.state == CAN_STATE_BUS_OFF) 2268 return IRQ_HANDLED; 2269 } 2270 2271 handled = IRQ_HANDLED; 2272 } while (1); 2273 2274 out_fail: 2275 netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", 2276 err, priv->regs_status.intf); 2277 mcp251xfd_chip_interrupts_disable(priv); 2278 2279 return handled; 2280 } 2281 2282 static inline struct 2283 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) 2284 { 2285 u8 tx_head; 2286 2287 tx_head = mcp251xfd_get_tx_head(tx_ring); 2288 2289 return &tx_ring->obj[tx_head]; 2290 } 2291 2292 static void 2293 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, 2294 struct mcp251xfd_tx_obj *tx_obj, 2295 const struct sk_buff *skb, 2296 unsigned int seq) 2297 { 2298 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 2299 struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj; 2300 union mcp251xfd_tx_obj_load_buf *load_buf; 2301 u8 dlc; 2302 u32 id, flags; 2303 int offset, len; 2304 2305 if (cfd->can_id & CAN_EFF_FLAG) { 2306 u32 sid, eid; 2307 2308 sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id); 2309 eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id); 2310 2311 id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) | 2312 FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid); 2313 2314 flags = MCP251XFD_OBJ_FLAGS_IDE; 2315 } else { 2316 id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id); 2317 flags = 0; 2318 } 2319 2320 /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't 2321 * harm, only the lower 7 bits will be transferred into the 2322 * TEF object. 2323 */ 2324 dlc = can_fd_len2dlc(cfd->len); 2325 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) | 2326 FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc); 2327 2328 if (cfd->can_id & CAN_RTR_FLAG) 2329 flags |= MCP251XFD_OBJ_FLAGS_RTR; 2330 2331 /* CANFD */ 2332 if (can_is_canfd_skb(skb)) { 2333 if (cfd->flags & CANFD_ESI) 2334 flags |= MCP251XFD_OBJ_FLAGS_ESI; 2335 2336 flags |= MCP251XFD_OBJ_FLAGS_FDF; 2337 2338 if (cfd->flags & CANFD_BRS) 2339 flags |= MCP251XFD_OBJ_FLAGS_BRS; 2340 } 2341 2342 load_buf = &tx_obj->buf; 2343 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) 2344 hw_tx_obj = &load_buf->crc.hw_tx_obj; 2345 else 2346 hw_tx_obj = &load_buf->nocrc.hw_tx_obj; 2347 2348 put_unaligned_le32(id, &hw_tx_obj->id); 2349 put_unaligned_le32(flags, &hw_tx_obj->flags); 2350 2351 /* Clear data at end of CAN frame */ 2352 offset = round_down(cfd->len, sizeof(u32)); 2353 len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset; 2354 if (MCP251XFD_SANITIZE_CAN && len) 2355 memset(hw_tx_obj->data + offset, 0x0, len); 2356 memcpy(hw_tx_obj->data, cfd->data, cfd->len); 2357 2358 /* Number of bytes to be written into the RAM of the controller */ 2359 len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); 2360 if (MCP251XFD_SANITIZE_CAN) 2361 len += round_up(can_fd_dlc2len(dlc), sizeof(u32)); 2362 else 2363 len += round_up(cfd->len, sizeof(u32)); 2364 2365 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) { 2366 u16 crc; 2367 2368 mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd, 2369 len); 2370 /* CRC */ 2371 len += sizeof(load_buf->crc.cmd); 2372 crc = mcp251xfd_crc16_compute(&load_buf->crc, len); 2373 put_unaligned_be16(crc, (void *)load_buf + len); 2374 2375 /* Total length */ 2376 len += sizeof(load_buf->crc.crc); 2377 } else { 2378 len += sizeof(load_buf->nocrc.cmd); 2379 } 2380 2381 tx_obj->xfer[0].len = len; 2382 } 2383 2384 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, 2385 struct mcp251xfd_tx_obj *tx_obj) 2386 { 2387 return spi_async(priv->spi, &tx_obj->msg); 2388 } 2389 2390 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, 2391 struct mcp251xfd_tx_ring *tx_ring) 2392 { 2393 if (mcp251xfd_get_tx_free(tx_ring) > 0) 2394 return false; 2395 2396 netif_stop_queue(priv->ndev); 2397 2398 /* Memory barrier before checking tx_free (head and tail) */ 2399 smp_mb(); 2400 2401 if (mcp251xfd_get_tx_free(tx_ring) == 0) { 2402 netdev_dbg(priv->ndev, 2403 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", 2404 tx_ring->head, tx_ring->tail, 2405 tx_ring->head - tx_ring->tail); 2406 2407 return true; 2408 } 2409 2410 netif_start_queue(priv->ndev); 2411 2412 return false; 2413 } 2414 2415 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, 2416 struct net_device *ndev) 2417 { 2418 struct mcp251xfd_priv *priv = netdev_priv(ndev); 2419 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 2420 struct mcp251xfd_tx_obj *tx_obj; 2421 u8 tx_head; 2422 int err; 2423 2424 if (can_dropped_invalid_skb(ndev, skb)) 2425 return NETDEV_TX_OK; 2426 2427 if (mcp251xfd_tx_busy(priv, tx_ring)) 2428 return NETDEV_TX_BUSY; 2429 2430 tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); 2431 mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head); 2432 2433 /* Stop queue if we occupy the complete TX FIFO */ 2434 tx_head = mcp251xfd_get_tx_head(tx_ring); 2435 tx_ring->head++; 2436 if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num) 2437 netif_stop_queue(ndev); 2438 2439 can_put_echo_skb(skb, ndev, tx_head); 2440 2441 err = mcp251xfd_tx_obj_write(priv, tx_obj); 2442 if (err) 2443 goto out_err; 2444 2445 return NETDEV_TX_OK; 2446 2447 out_err: 2448 netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); 2449 2450 return NETDEV_TX_OK; 2451 } 2452 2453 static int mcp251xfd_open(struct net_device *ndev) 2454 { 2455 struct mcp251xfd_priv *priv = netdev_priv(ndev); 2456 const struct spi_device *spi = priv->spi; 2457 int err; 2458 2459 err = pm_runtime_get_sync(ndev->dev.parent); 2460 if (err < 0) { 2461 pm_runtime_put_noidle(ndev->dev.parent); 2462 return err; 2463 } 2464 2465 err = open_candev(ndev); 2466 if (err) 2467 goto out_pm_runtime_put; 2468 2469 err = mcp251xfd_ring_alloc(priv); 2470 if (err) 2471 goto out_close_candev; 2472 2473 err = mcp251xfd_transceiver_enable(priv); 2474 if (err) 2475 goto out_mcp251xfd_ring_free; 2476 2477 err = mcp251xfd_chip_start(priv); 2478 if (err) 2479 goto out_transceiver_disable; 2480 2481 can_rx_offload_enable(&priv->offload); 2482 2483 err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, 2484 IRQF_ONESHOT, dev_name(&spi->dev), 2485 priv); 2486 if (err) 2487 goto out_can_rx_offload_disable; 2488 2489 err = mcp251xfd_chip_interrupts_enable(priv); 2490 if (err) 2491 goto out_free_irq; 2492 2493 netif_start_queue(ndev); 2494 2495 return 0; 2496 2497 out_free_irq: 2498 free_irq(spi->irq, priv); 2499 out_can_rx_offload_disable: 2500 can_rx_offload_disable(&priv->offload); 2501 out_transceiver_disable: 2502 mcp251xfd_transceiver_disable(priv); 2503 out_mcp251xfd_ring_free: 2504 mcp251xfd_ring_free(priv); 2505 out_close_candev: 2506 close_candev(ndev); 2507 out_pm_runtime_put: 2508 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 2509 pm_runtime_put(ndev->dev.parent); 2510 2511 return err; 2512 } 2513 2514 static int mcp251xfd_stop(struct net_device *ndev) 2515 { 2516 struct mcp251xfd_priv *priv = netdev_priv(ndev); 2517 2518 netif_stop_queue(ndev); 2519 mcp251xfd_chip_interrupts_disable(priv); 2520 free_irq(ndev->irq, priv); 2521 can_rx_offload_disable(&priv->offload); 2522 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 2523 mcp251xfd_transceiver_disable(priv); 2524 mcp251xfd_ring_free(priv); 2525 close_candev(ndev); 2526 2527 pm_runtime_put(ndev->dev.parent); 2528 2529 return 0; 2530 } 2531 2532 static const struct net_device_ops mcp251xfd_netdev_ops = { 2533 .ndo_open = mcp251xfd_open, 2534 .ndo_stop = mcp251xfd_stop, 2535 .ndo_start_xmit = mcp251xfd_start_xmit, 2536 .ndo_change_mtu = can_change_mtu, 2537 }; 2538 2539 static void 2540 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv) 2541 { 2542 const struct spi_device *spi = priv->spi; 2543 const struct spi_controller *ctlr = spi->controller; 2544 2545 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) 2546 priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX; 2547 } 2548 2549 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) 2550 { 2551 const struct net_device *ndev = priv->ndev; 2552 const struct mcp251xfd_devtype_data *devtype_data; 2553 u32 osc; 2554 int err; 2555 2556 /* The OSC_LPMEN is only supported on MCP2518FD, so use it to 2557 * autodetect the model. 2558 */ 2559 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC, 2560 MCP251XFD_REG_OSC_LPMEN, 2561 MCP251XFD_REG_OSC_LPMEN); 2562 if (err) 2563 return err; 2564 2565 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); 2566 if (err) 2567 return err; 2568 2569 if (osc & MCP251XFD_REG_OSC_LPMEN) 2570 devtype_data = &mcp251xfd_devtype_data_mcp2518fd; 2571 else 2572 devtype_data = &mcp251xfd_devtype_data_mcp2517fd; 2573 2574 if (!mcp251xfd_is_251X(priv) && 2575 priv->devtype_data.model != devtype_data->model) { 2576 netdev_info(ndev, 2577 "Detected %s, but firmware specifies a %s. Fixing up.", 2578 __mcp251xfd_get_model_str(devtype_data->model), 2579 mcp251xfd_get_model_str(priv)); 2580 } 2581 priv->devtype_data = *devtype_data; 2582 2583 /* We need to preserve the Half Duplex Quirk. */ 2584 mcp251xfd_register_quirks(priv); 2585 2586 /* Re-init regmap with quirks of detected model. */ 2587 return mcp251xfd_regmap_init(priv); 2588 } 2589 2590 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) 2591 { 2592 int err, rx_pending; 2593 2594 if (!priv->rx_int) 2595 return 0; 2596 2597 err = mcp251xfd_chip_rx_int_enable(priv); 2598 if (err) 2599 return err; 2600 2601 /* Check if RX_INT is properly working. The RX_INT should not 2602 * be active after a softreset. 2603 */ 2604 rx_pending = gpiod_get_value_cansleep(priv->rx_int); 2605 2606 err = mcp251xfd_chip_rx_int_disable(priv); 2607 if (err) 2608 return err; 2609 2610 if (!rx_pending) 2611 return 0; 2612 2613 netdev_info(priv->ndev, 2614 "RX_INT active after softreset, disabling RX_INT support."); 2615 devm_gpiod_put(&priv->spi->dev, priv->rx_int); 2616 priv->rx_int = NULL; 2617 2618 return 0; 2619 } 2620 2621 static int 2622 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, 2623 u32 *dev_id, u32 *effective_speed_hz) 2624 { 2625 struct mcp251xfd_map_buf_nocrc *buf_rx; 2626 struct mcp251xfd_map_buf_nocrc *buf_tx; 2627 struct spi_transfer xfer[2] = { }; 2628 int err; 2629 2630 buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL); 2631 if (!buf_rx) 2632 return -ENOMEM; 2633 2634 buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL); 2635 if (!buf_tx) { 2636 err = -ENOMEM; 2637 goto out_kfree_buf_rx; 2638 } 2639 2640 xfer[0].tx_buf = buf_tx; 2641 xfer[0].len = sizeof(buf_tx->cmd); 2642 xfer[1].rx_buf = buf_rx->data; 2643 xfer[1].len = sizeof(dev_id); 2644 2645 mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID); 2646 err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer)); 2647 if (err) 2648 goto out_kfree_buf_tx; 2649 2650 *dev_id = be32_to_cpup((__be32 *)buf_rx->data); 2651 *effective_speed_hz = xfer->effective_speed_hz; 2652 2653 out_kfree_buf_tx: 2654 kfree(buf_tx); 2655 out_kfree_buf_rx: 2656 kfree(buf_rx); 2657 2658 return 0; 2659 } 2660 2661 #define MCP251XFD_QUIRK_ACTIVE(quirk) \ 2662 (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-') 2663 2664 static int 2665 mcp251xfd_register_done(const struct mcp251xfd_priv *priv) 2666 { 2667 u32 dev_id, effective_speed_hz; 2668 int err; 2669 2670 err = mcp251xfd_register_get_dev_id(priv, &dev_id, 2671 &effective_speed_hz); 2672 if (err) 2673 return err; 2674 2675 netdev_info(priv->ndev, 2676 "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n", 2677 mcp251xfd_get_model_str(priv), 2678 FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id), 2679 FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id), 2680 priv->rx_int ? '+' : '-', 2681 MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN), 2682 MCP251XFD_QUIRK_ACTIVE(CRC_REG), 2683 MCP251XFD_QUIRK_ACTIVE(CRC_RX), 2684 MCP251XFD_QUIRK_ACTIVE(CRC_TX), 2685 MCP251XFD_QUIRK_ACTIVE(ECC), 2686 MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX), 2687 priv->can.clock.freq / 1000000, 2688 priv->can.clock.freq % 1000000 / 1000 / 10, 2689 priv->spi_max_speed_hz_orig / 1000000, 2690 priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10, 2691 priv->spi->max_speed_hz / 1000000, 2692 priv->spi->max_speed_hz % 1000000 / 1000 / 10, 2693 effective_speed_hz / 1000000, 2694 effective_speed_hz % 1000000 / 1000 / 10); 2695 2696 return 0; 2697 } 2698 2699 static int mcp251xfd_register(struct mcp251xfd_priv *priv) 2700 { 2701 struct net_device *ndev = priv->ndev; 2702 int err; 2703 2704 err = mcp251xfd_clks_and_vdd_enable(priv); 2705 if (err) 2706 return err; 2707 2708 pm_runtime_get_noresume(ndev->dev.parent); 2709 err = pm_runtime_set_active(ndev->dev.parent); 2710 if (err) 2711 goto out_runtime_put_noidle; 2712 pm_runtime_enable(ndev->dev.parent); 2713 2714 mcp251xfd_register_quirks(priv); 2715 2716 err = mcp251xfd_chip_softreset(priv); 2717 if (err == -ENODEV) 2718 goto out_runtime_disable; 2719 if (err) 2720 goto out_chip_set_mode_sleep; 2721 2722 err = mcp251xfd_register_chip_detect(priv); 2723 if (err) 2724 goto out_chip_set_mode_sleep; 2725 2726 err = mcp251xfd_register_check_rx_int(priv); 2727 if (err) 2728 goto out_chip_set_mode_sleep; 2729 2730 err = register_candev(ndev); 2731 if (err) 2732 goto out_chip_set_mode_sleep; 2733 2734 err = mcp251xfd_register_done(priv); 2735 if (err) 2736 goto out_unregister_candev; 2737 2738 /* Put controller into sleep mode and let pm_runtime_put() 2739 * disable the clocks and vdd. If CONFIG_PM is not enabled, 2740 * the clocks and vdd will stay powered. 2741 */ 2742 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); 2743 if (err) 2744 goto out_unregister_candev; 2745 2746 pm_runtime_put(ndev->dev.parent); 2747 2748 return 0; 2749 2750 out_unregister_candev: 2751 unregister_candev(ndev); 2752 out_chip_set_mode_sleep: 2753 mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); 2754 out_runtime_disable: 2755 pm_runtime_disable(ndev->dev.parent); 2756 out_runtime_put_noidle: 2757 pm_runtime_put_noidle(ndev->dev.parent); 2758 mcp251xfd_clks_and_vdd_disable(priv); 2759 2760 return err; 2761 } 2762 2763 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv) 2764 { 2765 struct net_device *ndev = priv->ndev; 2766 2767 unregister_candev(ndev); 2768 2769 pm_runtime_get_sync(ndev->dev.parent); 2770 pm_runtime_put_noidle(ndev->dev.parent); 2771 mcp251xfd_clks_and_vdd_disable(priv); 2772 pm_runtime_disable(ndev->dev.parent); 2773 } 2774 2775 static const struct of_device_id mcp251xfd_of_match[] = { 2776 { 2777 .compatible = "microchip,mcp2517fd", 2778 .data = &mcp251xfd_devtype_data_mcp2517fd, 2779 }, { 2780 .compatible = "microchip,mcp2518fd", 2781 .data = &mcp251xfd_devtype_data_mcp2518fd, 2782 }, { 2783 .compatible = "microchip,mcp251xfd", 2784 .data = &mcp251xfd_devtype_data_mcp251xfd, 2785 }, { 2786 /* sentinel */ 2787 }, 2788 }; 2789 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match); 2790 2791 static const struct spi_device_id mcp251xfd_id_table[] = { 2792 { 2793 .name = "mcp2517fd", 2794 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd, 2795 }, { 2796 .name = "mcp2518fd", 2797 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd, 2798 }, { 2799 .name = "mcp251xfd", 2800 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd, 2801 }, { 2802 /* sentinel */ 2803 }, 2804 }; 2805 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table); 2806 2807 static int mcp251xfd_probe(struct spi_device *spi) 2808 { 2809 const void *match; 2810 struct net_device *ndev; 2811 struct mcp251xfd_priv *priv; 2812 struct gpio_desc *rx_int; 2813 struct regulator *reg_vdd, *reg_xceiver; 2814 struct clk *clk; 2815 u32 freq; 2816 int err; 2817 2818 if (!spi->irq) 2819 return dev_err_probe(&spi->dev, -ENXIO, 2820 "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n"); 2821 2822 rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int", 2823 GPIOD_IN); 2824 if (PTR_ERR(rx_int) == -EPROBE_DEFER) 2825 return -EPROBE_DEFER; 2826 else if (IS_ERR(rx_int)) 2827 return PTR_ERR(rx_int); 2828 2829 reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd"); 2830 if (PTR_ERR(reg_vdd) == -EPROBE_DEFER) 2831 return -EPROBE_DEFER; 2832 else if (PTR_ERR(reg_vdd) == -ENODEV) 2833 reg_vdd = NULL; 2834 else if (IS_ERR(reg_vdd)) 2835 return PTR_ERR(reg_vdd); 2836 2837 reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); 2838 if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) 2839 return -EPROBE_DEFER; 2840 else if (PTR_ERR(reg_xceiver) == -ENODEV) 2841 reg_xceiver = NULL; 2842 else if (IS_ERR(reg_xceiver)) 2843 return PTR_ERR(reg_xceiver); 2844 2845 clk = devm_clk_get(&spi->dev, NULL); 2846 if (IS_ERR(clk)) { 2847 dev_err(&spi->dev, "No Oscillator (clock) defined.\n"); 2848 return PTR_ERR(clk); 2849 } 2850 freq = clk_get_rate(clk); 2851 2852 /* Sanity check */ 2853 if (freq < MCP251XFD_SYSCLOCK_HZ_MIN || 2854 freq > MCP251XFD_SYSCLOCK_HZ_MAX) { 2855 dev_err(&spi->dev, 2856 "Oscillator frequency (%u Hz) is too low or high.\n", 2857 freq); 2858 return -ERANGE; 2859 } 2860 2861 if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) { 2862 dev_err(&spi->dev, 2863 "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n", 2864 freq); 2865 return -ERANGE; 2866 } 2867 2868 ndev = alloc_candev(sizeof(struct mcp251xfd_priv), 2869 MCP251XFD_TX_OBJ_NUM_MAX); 2870 if (!ndev) 2871 return -ENOMEM; 2872 2873 SET_NETDEV_DEV(ndev, &spi->dev); 2874 2875 ndev->netdev_ops = &mcp251xfd_netdev_ops; 2876 ndev->irq = spi->irq; 2877 ndev->flags |= IFF_ECHO; 2878 2879 priv = netdev_priv(ndev); 2880 spi_set_drvdata(spi, priv); 2881 priv->can.clock.freq = freq; 2882 priv->can.do_set_mode = mcp251xfd_set_mode; 2883 priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; 2884 priv->can.bittiming_const = &mcp251xfd_bittiming_const; 2885 priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; 2886 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 2887 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | 2888 CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; 2889 priv->ndev = ndev; 2890 priv->spi = spi; 2891 priv->rx_int = rx_int; 2892 priv->clk = clk; 2893 priv->reg_vdd = reg_vdd; 2894 priv->reg_xceiver = reg_xceiver; 2895 2896 match = device_get_match_data(&spi->dev); 2897 if (match) 2898 priv->devtype_data = *(struct mcp251xfd_devtype_data *)match; 2899 else 2900 priv->devtype_data = *(struct mcp251xfd_devtype_data *) 2901 spi_get_device_id(spi)->driver_data; 2902 2903 /* Errata Reference: 2904 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4. 2905 * 2906 * The SPI can write corrupted data to the RAM at fast SPI 2907 * speeds: 2908 * 2909 * Simultaneous activity on the CAN bus while writing data to 2910 * RAM via the SPI interface, with high SCK frequency, can 2911 * lead to corrupted data being written to RAM. 2912 * 2913 * Fix/Work Around: 2914 * Ensure that FSCK is less than or equal to 0.85 * 2915 * (FSYSCLK/2). 2916 * 2917 * Known good and bad combinations are: 2918 * 2919 * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk Status config 2920 * 2921 * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx> 2922 * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 9375000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx> 2923 * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx> 2924 * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 18750000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx> 2925 * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz good assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT> 2926 * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 9523809 Hz 95.34% 28571429 Hz bad assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT> 2927 * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default 2928 * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default 2929 * 2930 */ 2931 priv->spi_max_speed_hz_orig = spi->max_speed_hz; 2932 spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850); 2933 spi->bits_per_word = 8; 2934 spi->rt = true; 2935 err = spi_setup(spi); 2936 if (err) 2937 goto out_free_candev; 2938 2939 err = mcp251xfd_regmap_init(priv); 2940 if (err) 2941 goto out_free_candev; 2942 2943 err = can_rx_offload_add_manual(ndev, &priv->offload, 2944 MCP251XFD_NAPI_WEIGHT); 2945 if (err) 2946 goto out_free_candev; 2947 2948 err = mcp251xfd_register(priv); 2949 if (err) 2950 goto out_free_candev; 2951 2952 return 0; 2953 2954 out_free_candev: 2955 spi->max_speed_hz = priv->spi_max_speed_hz_orig; 2956 2957 free_candev(ndev); 2958 2959 return err; 2960 } 2961 2962 static int mcp251xfd_remove(struct spi_device *spi) 2963 { 2964 struct mcp251xfd_priv *priv = spi_get_drvdata(spi); 2965 struct net_device *ndev = priv->ndev; 2966 2967 can_rx_offload_del(&priv->offload); 2968 mcp251xfd_unregister(priv); 2969 spi->max_speed_hz = priv->spi_max_speed_hz_orig; 2970 free_candev(ndev); 2971 2972 return 0; 2973 } 2974 2975 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) 2976 { 2977 const struct mcp251xfd_priv *priv = dev_get_drvdata(device); 2978 2979 return mcp251xfd_clks_and_vdd_disable(priv); 2980 } 2981 2982 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device) 2983 { 2984 const struct mcp251xfd_priv *priv = dev_get_drvdata(device); 2985 2986 return mcp251xfd_clks_and_vdd_enable(priv); 2987 } 2988 2989 static const struct dev_pm_ops mcp251xfd_pm_ops = { 2990 SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend, 2991 mcp251xfd_runtime_resume, NULL) 2992 }; 2993 2994 static struct spi_driver mcp251xfd_driver = { 2995 .driver = { 2996 .name = DEVICE_NAME, 2997 .pm = &mcp251xfd_pm_ops, 2998 .of_match_table = mcp251xfd_of_match, 2999 }, 3000 .probe = mcp251xfd_probe, 3001 .remove = mcp251xfd_remove, 3002 .id_table = mcp251xfd_id_table, 3003 }; 3004 module_spi_driver(mcp251xfd_driver); 3005 3006 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); 3007 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver"); 3008 MODULE_LICENSE("GPL v2"); 3009