xref: /openbmc/linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c (revision 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/pm_runtime.h>
22 
23 #include <asm/unaligned.h>
24 
25 #include "mcp251xfd.h"
26 
27 #define DEVICE_NAME "mcp251xfd"
28 
29 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
30 	.quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
31 		MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
32 		MCP251XFD_QUIRK_ECC,
33 	.model = MCP251XFD_MODEL_MCP2517FD,
34 };
35 
36 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
37 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
38 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
39 	.model = MCP251XFD_MODEL_MCP2518FD,
40 };
41 
42 /* Autodetect model, start with CRC enabled. */
43 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
44 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
45 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
46 	.model = MCP251XFD_MODEL_MCP251XFD,
47 };
48 
49 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
50 	.name = DEVICE_NAME,
51 	.tseg1_min = 2,
52 	.tseg1_max = 256,
53 	.tseg2_min = 1,
54 	.tseg2_max = 128,
55 	.sjw_max = 128,
56 	.brp_min = 1,
57 	.brp_max = 256,
58 	.brp_inc = 1,
59 };
60 
61 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
62 	.name = DEVICE_NAME,
63 	.tseg1_min = 1,
64 	.tseg1_max = 32,
65 	.tseg2_min = 1,
66 	.tseg2_max = 16,
67 	.sjw_max = 16,
68 	.brp_min = 1,
69 	.brp_max = 256,
70 	.brp_inc = 1,
71 };
72 
73 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
74 {
75 	switch (model) {
76 	case MCP251XFD_MODEL_MCP2517FD:
77 		return "MCP2517FD";
78 	case MCP251XFD_MODEL_MCP2518FD:
79 		return "MCP2518FD";
80 	case MCP251XFD_MODEL_MCP251XFD:
81 		return "MCP251xFD";
82 	}
83 
84 	return "<unknown>";
85 }
86 
87 static inline const char *
88 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
89 {
90 	return __mcp251xfd_get_model_str(priv->devtype_data.model);
91 }
92 
93 static const char *mcp251xfd_get_mode_str(const u8 mode)
94 {
95 	switch (mode) {
96 	case MCP251XFD_REG_CON_MODE_MIXED:
97 		return "Mixed (CAN FD/CAN 2.0)";
98 	case MCP251XFD_REG_CON_MODE_SLEEP:
99 		return "Sleep";
100 	case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
101 		return "Internal Loopback";
102 	case MCP251XFD_REG_CON_MODE_LISTENONLY:
103 		return "Listen Only";
104 	case MCP251XFD_REG_CON_MODE_CONFIG:
105 		return "Configuration";
106 	case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
107 		return "External Loopback";
108 	case MCP251XFD_REG_CON_MODE_CAN2_0:
109 		return "CAN 2.0";
110 	case MCP251XFD_REG_CON_MODE_RESTRICTED:
111 		return "Restricted Operation";
112 	}
113 
114 	return "<unknown>";
115 }
116 
117 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
118 {
119 	if (!priv->reg_vdd)
120 		return 0;
121 
122 	return regulator_enable(priv->reg_vdd);
123 }
124 
125 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
126 {
127 	if (!priv->reg_vdd)
128 		return 0;
129 
130 	return regulator_disable(priv->reg_vdd);
131 }
132 
133 static inline int
134 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
135 {
136 	if (!priv->reg_xceiver)
137 		return 0;
138 
139 	return regulator_enable(priv->reg_xceiver);
140 }
141 
142 static inline int
143 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
144 {
145 	if (!priv->reg_xceiver)
146 		return 0;
147 
148 	return regulator_disable(priv->reg_xceiver);
149 }
150 
151 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
152 {
153 	int err;
154 
155 	err = clk_prepare_enable(priv->clk);
156 	if (err)
157 		return err;
158 
159 	err = mcp251xfd_vdd_enable(priv);
160 	if (err)
161 		clk_disable_unprepare(priv->clk);
162 
163 	/* Wait for oscillator stabilisation time after power up */
164 	usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
165 		     2 * MCP251XFD_OSC_STAB_SLEEP_US);
166 
167 	return err;
168 }
169 
170 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
171 {
172 	int err;
173 
174 	err = mcp251xfd_vdd_disable(priv);
175 	if (err)
176 		return err;
177 
178 	clk_disable_unprepare(priv->clk);
179 
180 	return 0;
181 }
182 
183 static inline u8
184 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
185 				union mcp251xfd_write_reg_buf *write_reg_buf,
186 				const u16 reg, const u32 mask, const u32 val)
187 {
188 	u8 first_byte, last_byte, len;
189 	u8 *data;
190 	__le32 val_le32;
191 
192 	first_byte = mcp251xfd_first_byte_set(mask);
193 	last_byte = mcp251xfd_last_byte_set(mask);
194 	len = last_byte - first_byte + 1;
195 
196 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
197 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
198 	memcpy(data, &val_le32, len);
199 
200 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
201 		u16 crc;
202 
203 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
204 						     len);
205 		/* CRC */
206 		len += sizeof(write_reg_buf->crc.cmd);
207 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
208 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
209 
210 		/* Total length */
211 		len += sizeof(write_reg_buf->crc.crc);
212 	} else {
213 		len += sizeof(write_reg_buf->nocrc.cmd);
214 	}
215 
216 	return len;
217 }
218 
219 static inline int
220 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
221 				 u8 *tef_tail)
222 {
223 	u32 tef_ua;
224 	int err;
225 
226 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
227 	if (err)
228 		return err;
229 
230 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
231 
232 	return 0;
233 }
234 
235 static inline int
236 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
237 				u8 *tx_tail)
238 {
239 	u32 fifo_sta;
240 	int err;
241 
242 	err = regmap_read(priv->map_reg,
243 			  MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
244 			  &fifo_sta);
245 	if (err)
246 		return err;
247 
248 	*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
249 
250 	return 0;
251 }
252 
253 static inline int
254 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
255 				const struct mcp251xfd_rx_ring *ring,
256 				u8 *rx_head)
257 {
258 	u32 fifo_sta;
259 	int err;
260 
261 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
262 			  &fifo_sta);
263 	if (err)
264 		return err;
265 
266 	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
267 
268 	return 0;
269 }
270 
271 static inline int
272 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
273 				const struct mcp251xfd_rx_ring *ring,
274 				u8 *rx_tail)
275 {
276 	u32 fifo_ua;
277 	int err;
278 
279 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
280 			  &fifo_ua);
281 	if (err)
282 		return err;
283 
284 	fifo_ua -= ring->base - MCP251XFD_RAM_START;
285 	*rx_tail = fifo_ua / ring->obj_size;
286 
287 	return 0;
288 }
289 
290 static void
291 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
292 			      const struct mcp251xfd_tx_ring *ring,
293 			      struct mcp251xfd_tx_obj *tx_obj,
294 			      const u8 rts_buf_len,
295 			      const u8 n)
296 {
297 	struct spi_transfer *xfer;
298 	u16 addr;
299 
300 	/* FIFO load */
301 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
302 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
303 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
304 						     addr);
305 	else
306 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
307 					      addr);
308 
309 	xfer = &tx_obj->xfer[0];
310 	xfer->tx_buf = &tx_obj->buf;
311 	xfer->len = 0;	/* actual len is assigned on the fly */
312 	xfer->cs_change = 1;
313 	xfer->cs_change_delay.value = 0;
314 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
315 
316 	/* FIFO request to send */
317 	xfer = &tx_obj->xfer[1];
318 	xfer->tx_buf = &ring->rts_buf;
319 	xfer->len = rts_buf_len;
320 
321 	/* SPI message */
322 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
323 					ARRAY_SIZE(tx_obj->xfer));
324 }
325 
326 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
327 {
328 	struct mcp251xfd_tef_ring *tef_ring;
329 	struct mcp251xfd_tx_ring *tx_ring;
330 	struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
331 	struct mcp251xfd_tx_obj *tx_obj;
332 	struct spi_transfer *xfer;
333 	u32 val;
334 	u16 addr;
335 	u8 len;
336 	int i, j;
337 
338 	/* TEF */
339 	tef_ring = priv->tef;
340 	tef_ring->head = 0;
341 	tef_ring->tail = 0;
342 
343 	/* FIFO increment TEF tail pointer */
344 	addr = MCP251XFD_REG_TEFCON;
345 	val = MCP251XFD_REG_TEFCON_UINC;
346 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
347 					      addr, val, val);
348 
349 	for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
350 		xfer = &tef_ring->uinc_xfer[j];
351 		xfer->tx_buf = &tef_ring->uinc_buf;
352 		xfer->len = len;
353 		xfer->cs_change = 1;
354 		xfer->cs_change_delay.value = 0;
355 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
356 	}
357 
358 	/* "cs_change == 1" on the last transfer results in an active
359 	 * chip select after the complete SPI message. This causes the
360 	 * controller to interpret the next register access as
361 	 * data. Set "cs_change" of the last transfer to "0" to
362 	 * properly deactivate the chip select at the end of the
363 	 * message.
364 	 */
365 	xfer->cs_change = 0;
366 
367 	/* TX */
368 	tx_ring = priv->tx;
369 	tx_ring->head = 0;
370 	tx_ring->tail = 0;
371 	tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
372 
373 	/* FIFO request to send */
374 	addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
375 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
376 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
377 					      addr, val, val);
378 
379 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
380 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
381 
382 	/* RX */
383 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
384 		rx_ring->head = 0;
385 		rx_ring->tail = 0;
386 		rx_ring->nr = i;
387 		rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
388 
389 		if (!prev_rx_ring)
390 			rx_ring->base =
391 				mcp251xfd_get_tx_obj_addr(tx_ring,
392 							  tx_ring->obj_num);
393 		else
394 			rx_ring->base = prev_rx_ring->base +
395 				prev_rx_ring->obj_size *
396 				prev_rx_ring->obj_num;
397 
398 		prev_rx_ring = rx_ring;
399 
400 		/* FIFO increment RX tail pointer */
401 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
402 		val = MCP251XFD_REG_FIFOCON_UINC;
403 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
404 						      addr, val, val);
405 
406 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
407 			xfer = &rx_ring->uinc_xfer[j];
408 			xfer->tx_buf = &rx_ring->uinc_buf;
409 			xfer->len = len;
410 			xfer->cs_change = 1;
411 			xfer->cs_change_delay.value = 0;
412 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
413 		}
414 
415 		/* "cs_change == 1" on the last transfer results in an
416 		 * active chip select after the complete SPI
417 		 * message. This causes the controller to interpret
418 		 * the next register access as data. Set "cs_change"
419 		 * of the last transfer to "0" to properly deactivate
420 		 * the chip select at the end of the message.
421 		 */
422 		xfer->cs_change = 0;
423 	}
424 }
425 
426 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
427 {
428 	int i;
429 
430 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
431 		kfree(priv->rx[i]);
432 		priv->rx[i] = NULL;
433 	}
434 }
435 
436 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
437 {
438 	struct mcp251xfd_tx_ring *tx_ring;
439 	struct mcp251xfd_rx_ring *rx_ring;
440 	int tef_obj_size, tx_obj_size, rx_obj_size;
441 	int tx_obj_num;
442 	int ram_free, i;
443 
444 	tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
445 	/* listen-only mode works like FD mode */
446 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
447 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
448 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
449 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
450 	} else {
451 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
452 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
453 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
454 	}
455 
456 	tx_ring = priv->tx;
457 	tx_ring->obj_num = tx_obj_num;
458 	tx_ring->obj_size = tx_obj_size;
459 
460 	ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
461 		(tef_obj_size + tx_obj_size);
462 
463 	for (i = 0;
464 	     i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
465 	     i++) {
466 		int rx_obj_num;
467 
468 		rx_obj_num = ram_free / rx_obj_size;
469 		rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
470 				 MCP251XFD_RX_OBJ_NUM_MAX);
471 
472 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
473 				  GFP_KERNEL);
474 		if (!rx_ring) {
475 			mcp251xfd_ring_free(priv);
476 			return -ENOMEM;
477 		}
478 		rx_ring->obj_num = rx_obj_num;
479 		rx_ring->obj_size = rx_obj_size;
480 		priv->rx[i] = rx_ring;
481 
482 		ram_free -= rx_ring->obj_num * rx_ring->obj_size;
483 	}
484 	priv->rx_ring_num = i;
485 
486 	netdev_dbg(priv->ndev,
487 		   "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
488 		   tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
489 		   tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
490 
491 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
492 		netdev_dbg(priv->ndev,
493 			   "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
494 			   i, rx_ring->obj_num, rx_ring->obj_size,
495 			   rx_ring->obj_size * rx_ring->obj_num);
496 	}
497 
498 	netdev_dbg(priv->ndev,
499 		   "FIFO setup: free: %d bytes\n",
500 		   ram_free);
501 
502 	return 0;
503 }
504 
505 static inline int
506 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
507 {
508 	u32 val;
509 	int err;
510 
511 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
512 	if (err)
513 		return err;
514 
515 	*mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
516 
517 	return 0;
518 }
519 
520 static int
521 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
522 			  const u8 mode_req, bool nowait)
523 {
524 	u32 con, con_reqop;
525 	int err;
526 
527 	con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
528 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
529 				 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
530 	if (err)
531 		return err;
532 
533 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
534 		return 0;
535 
536 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
537 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
538 						 con) == mode_req,
539 				       MCP251XFD_POLL_SLEEP_US,
540 				       MCP251XFD_POLL_TIMEOUT_US);
541 	if (err) {
542 		u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
543 
544 		netdev_err(priv->ndev,
545 			   "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
546 			   mcp251xfd_get_mode_str(mode_req), mode_req,
547 			   mcp251xfd_get_mode_str(mode), mode);
548 		return err;
549 	}
550 
551 	return 0;
552 }
553 
554 static inline int
555 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
556 			const u8 mode_req)
557 {
558 	return __mcp251xfd_chip_set_mode(priv, mode_req, false);
559 }
560 
561 static inline int
562 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
563 			       const u8 mode_req)
564 {
565 	return __mcp251xfd_chip_set_mode(priv, mode_req, true);
566 }
567 
568 static inline bool mcp251xfd_osc_invalid(u32 reg)
569 {
570 	return reg == 0x0 || reg == 0xffffffff;
571 }
572 
573 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
574 {
575 	u32 osc, osc_reference, osc_mask;
576 	int err;
577 
578 	/* Set Power On Defaults for "Clock Output Divisor" and remove
579 	 * "Oscillator Disable" bit.
580 	 */
581 	osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
582 			 MCP251XFD_REG_OSC_CLKODIV_10);
583 	osc_reference = MCP251XFD_REG_OSC_OSCRDY;
584 	osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
585 
586 	/* Note:
587 	 *
588 	 * If the controller is in Sleep Mode the following write only
589 	 * removes the "Oscillator Disable" bit and powers it up. All
590 	 * other bits are unaffected.
591 	 */
592 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
593 	if (err)
594 		return err;
595 
596 	/* Wait for "Oscillator Ready" bit */
597 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
598 				       (osc & osc_mask) == osc_reference,
599 				       MCP251XFD_OSC_STAB_SLEEP_US,
600 				       MCP251XFD_OSC_STAB_TIMEOUT_US);
601 	if (mcp251xfd_osc_invalid(osc)) {
602 		netdev_err(priv->ndev,
603 			   "Failed to detect %s (osc=0x%08x).\n",
604 			   mcp251xfd_get_model_str(priv), osc);
605 		return -ENODEV;
606 	} else if (err == -ETIMEDOUT) {
607 		netdev_err(priv->ndev,
608 			   "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
609 			   osc, osc_reference);
610 		return -ETIMEDOUT;
611 	}
612 
613 	return err;
614 }
615 
616 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
617 {
618 	const __be16 cmd = mcp251xfd_cmd_reset();
619 	int err;
620 
621 	/* The Set Mode and SPI Reset command only seems to works if
622 	 * the controller is not in Sleep Mode.
623 	 */
624 	err = mcp251xfd_chip_clock_enable(priv);
625 	if (err)
626 		return err;
627 
628 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
629 	if (err)
630 		return err;
631 
632 	/* spi_write_then_read() works with non DMA-safe buffers */
633 	return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
634 }
635 
636 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
637 {
638 	u32 osc, osc_reference;
639 	u8 mode;
640 	int err;
641 
642 	err = mcp251xfd_chip_get_mode(priv, &mode);
643 	if (err)
644 		return err;
645 
646 	if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
647 		netdev_info(priv->ndev,
648 			    "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
649 			    mcp251xfd_get_mode_str(mode), mode);
650 		return -ETIMEDOUT;
651 	}
652 
653 	osc_reference = MCP251XFD_REG_OSC_OSCRDY |
654 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
655 			   MCP251XFD_REG_OSC_CLKODIV_10);
656 
657 	/* check reset defaults of OSC reg */
658 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
659 	if (err)
660 		return err;
661 
662 	if (osc != osc_reference) {
663 		netdev_info(priv->ndev,
664 			    "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
665 			    osc, osc_reference);
666 		return -ETIMEDOUT;
667 	}
668 
669 	return 0;
670 }
671 
672 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
673 {
674 	int err, i;
675 
676 	for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
677 		if (i)
678 			netdev_info(priv->ndev,
679 				    "Retrying to reset controller.\n");
680 
681 		err = mcp251xfd_chip_softreset_do(priv);
682 		if (err == -ETIMEDOUT)
683 			continue;
684 		if (err)
685 			return err;
686 
687 		err = mcp251xfd_chip_softreset_check(priv);
688 		if (err == -ETIMEDOUT)
689 			continue;
690 		if (err)
691 			return err;
692 
693 		return 0;
694 	}
695 
696 	return err;
697 }
698 
699 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
700 {
701 	u32 osc;
702 	int err;
703 
704 	/* Activate Low Power Mode on Oscillator Disable. This only
705 	 * works on the MCP2518FD. The MCP2517FD will go into normal
706 	 * Sleep Mode instead.
707 	 */
708 	osc = MCP251XFD_REG_OSC_LPMEN |
709 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
710 			   MCP251XFD_REG_OSC_CLKODIV_10);
711 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
712 	if (err)
713 		return err;
714 
715 	/* Set Time Base Counter Prescaler to 1.
716 	 *
717 	 * This means an overflow of the 32 bit Time Base Counter
718 	 * register at 40 MHz every 107 seconds.
719 	 */
720 	return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
721 			    MCP251XFD_REG_TSCON_TBCEN);
722 }
723 
724 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
725 {
726 	const struct can_bittiming *bt = &priv->can.bittiming;
727 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
728 	u32 val = 0;
729 	s8 tdco;
730 	int err;
731 
732 	/* CAN Control Register
733 	 *
734 	 * - no transmit bandwidth sharing
735 	 * - config mode
736 	 * - disable transmit queue
737 	 * - store in transmit FIFO event
738 	 * - transition to restricted operation mode on system error
739 	 * - ESI is transmitted recessive when ESI of message is high or
740 	 *   CAN controller error passive
741 	 * - restricted retransmission attempts,
742 	 *   use TQXCON_TXAT and FIFOCON_TXAT
743 	 * - wake-up filter bits T11FILTER
744 	 * - use CAN bus line filter for wakeup
745 	 * - protocol exception is treated as a form error
746 	 * - Do not compare data bytes
747 	 */
748 	val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
749 			 MCP251XFD_REG_CON_MODE_CONFIG) |
750 		MCP251XFD_REG_CON_STEF |
751 		MCP251XFD_REG_CON_ESIGM |
752 		MCP251XFD_REG_CON_RTXAT |
753 		FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
754 			   MCP251XFD_REG_CON_WFT_T11FILTER) |
755 		MCP251XFD_REG_CON_WAKFIL |
756 		MCP251XFD_REG_CON_PXEDIS;
757 
758 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
759 		val |= MCP251XFD_REG_CON_ISOCRCEN;
760 
761 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
762 	if (err)
763 		return err;
764 
765 	/* Nominal Bit Time */
766 	val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
767 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
768 			   bt->prop_seg + bt->phase_seg1 - 1) |
769 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
770 			   bt->phase_seg2 - 1) |
771 		FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
772 
773 	err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
774 	if (err)
775 		return err;
776 
777 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
778 		return 0;
779 
780 	/* Data Bit Time */
781 	val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
782 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
783 			   dbt->prop_seg + dbt->phase_seg1 - 1) |
784 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
785 			   dbt->phase_seg2 - 1) |
786 		FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
787 
788 	err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
789 	if (err)
790 		return err;
791 
792 	/* Transmitter Delay Compensation */
793 	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
794 		       -64, 63);
795 	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
796 			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
797 		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
798 
799 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
800 }
801 
802 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
803 {
804 	u32 val;
805 
806 	if (!priv->rx_int)
807 		return 0;
808 
809 	/* Configure GPIOs:
810 	 * - PIN0: GPIO Input
811 	 * - PIN1: GPIO Input/RX Interrupt
812 	 *
813 	 * PIN1 must be Input, otherwise there is a glitch on the
814 	 * rx-INT line. It happens between setting the PIN as output
815 	 * (in the first byte of the SPI transfer) and configuring the
816 	 * PIN as interrupt (in the last byte of the SPI transfer).
817 	 */
818 	val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
819 		MCP251XFD_REG_IOCON_TRIS0;
820 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
821 }
822 
823 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
824 {
825 	u32 val;
826 
827 	if (!priv->rx_int)
828 		return 0;
829 
830 	/* Configure GPIOs:
831 	 * - PIN0: GPIO Input
832 	 * - PIN1: GPIO Input
833 	 */
834 	val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
835 		MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
836 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
837 }
838 
839 static int
840 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
841 				const struct mcp251xfd_rx_ring *ring)
842 {
843 	u32 fifo_con;
844 
845 	/* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
846 	 *
847 	 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
848 	 * generate a RXOVIF, use this to properly detect RX MAB
849 	 * overflows.
850 	 */
851 	fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
852 			      ring->obj_num - 1) |
853 		MCP251XFD_REG_FIFOCON_RXTSEN |
854 		MCP251XFD_REG_FIFOCON_RXOVIE |
855 		MCP251XFD_REG_FIFOCON_TFNRFNIE;
856 
857 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
858 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
859 				       MCP251XFD_REG_FIFOCON_PLSIZE_64);
860 	else
861 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
862 				       MCP251XFD_REG_FIFOCON_PLSIZE_8);
863 
864 	return regmap_write(priv->map_reg,
865 			    MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
866 }
867 
868 static int
869 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
870 				  const struct mcp251xfd_rx_ring *ring)
871 {
872 	u32 fltcon;
873 
874 	fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
875 		MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
876 
877 	return regmap_update_bits(priv->map_reg,
878 				  MCP251XFD_REG_FLTCON(ring->nr >> 2),
879 				  MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
880 				  fltcon);
881 }
882 
883 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
884 {
885 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
886 	const struct mcp251xfd_rx_ring *rx_ring;
887 	u32 val;
888 	int err, n;
889 
890 	/* TEF */
891 	val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
892 			 tx_ring->obj_num - 1) |
893 		MCP251XFD_REG_TEFCON_TEFTSEN |
894 		MCP251XFD_REG_TEFCON_TEFOVIE |
895 		MCP251XFD_REG_TEFCON_TEFNEIE;
896 
897 	err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
898 	if (err)
899 		return err;
900 
901 	/* FIFO 1 - TX */
902 	val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
903 			 tx_ring->obj_num - 1) |
904 		MCP251XFD_REG_FIFOCON_TXEN |
905 		MCP251XFD_REG_FIFOCON_TXATIE;
906 
907 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
908 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
909 				  MCP251XFD_REG_FIFOCON_PLSIZE_64);
910 	else
911 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
912 				  MCP251XFD_REG_FIFOCON_PLSIZE_8);
913 
914 	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
915 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
916 				  MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
917 	else
918 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
919 				  MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
920 
921 	err = regmap_write(priv->map_reg,
922 			   MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
923 			   val);
924 	if (err)
925 		return err;
926 
927 	/* RX FIFOs */
928 	mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
929 		err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
930 		if (err)
931 			return err;
932 
933 		err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
934 		if (err)
935 			return err;
936 	}
937 
938 	return 0;
939 }
940 
941 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
942 {
943 	struct mcp251xfd_ecc *ecc = &priv->ecc;
944 	void *ram;
945 	u32 val = 0;
946 	int err;
947 
948 	ecc->ecc_stat = 0;
949 
950 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
951 		val = MCP251XFD_REG_ECCCON_ECCEN;
952 
953 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
954 				 MCP251XFD_REG_ECCCON_ECCEN, val);
955 	if (err)
956 		return err;
957 
958 	ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
959 	if (!ram)
960 		return -ENOMEM;
961 
962 	err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
963 			       MCP251XFD_RAM_SIZE);
964 	kfree(ram);
965 
966 	return err;
967 }
968 
969 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
970 {
971 	struct mcp251xfd_ecc *ecc = &priv->ecc;
972 
973 	ecc->ecc_stat = 0;
974 }
975 
976 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
977 {
978 	u8 mode;
979 
980 
981 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
982 		mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
983 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
984 		mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
985 	else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
986 		mode = MCP251XFD_REG_CON_MODE_MIXED;
987 	else
988 		mode = MCP251XFD_REG_CON_MODE_CAN2_0;
989 
990 	return mode;
991 }
992 
993 static int
994 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
995 				 bool nowait)
996 {
997 	u8 mode;
998 
999 	mode = mcp251xfd_get_normal_mode(priv);
1000 
1001 	return __mcp251xfd_chip_set_mode(priv, mode, nowait);
1002 }
1003 
1004 static inline int
1005 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
1006 {
1007 	return __mcp251xfd_chip_set_normal_mode(priv, false);
1008 }
1009 
1010 static inline int
1011 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
1012 {
1013 	return __mcp251xfd_chip_set_normal_mode(priv, true);
1014 }
1015 
1016 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1017 {
1018 	u32 val;
1019 	int err;
1020 
1021 	val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1022 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1023 	if (err)
1024 		return err;
1025 
1026 	val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1027 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1028 	if (err)
1029 		return err;
1030 
1031 	val = MCP251XFD_REG_INT_CERRIE |
1032 		MCP251XFD_REG_INT_SERRIE |
1033 		MCP251XFD_REG_INT_RXOVIE |
1034 		MCP251XFD_REG_INT_TXATIE |
1035 		MCP251XFD_REG_INT_SPICRCIE |
1036 		MCP251XFD_REG_INT_ECCIE |
1037 		MCP251XFD_REG_INT_TEFIE |
1038 		MCP251XFD_REG_INT_MODIE |
1039 		MCP251XFD_REG_INT_RXIE;
1040 
1041 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1042 		val |= MCP251XFD_REG_INT_IVMIE;
1043 
1044 	return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1045 }
1046 
1047 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1048 {
1049 	int err;
1050 	u32 mask;
1051 
1052 	err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1053 	if (err)
1054 		return err;
1055 
1056 	mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1057 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1058 				 mask, 0x0);
1059 	if (err)
1060 		return err;
1061 
1062 	return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1063 }
1064 
1065 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1066 			       const enum can_state state)
1067 {
1068 	priv->can.state = state;
1069 
1070 	mcp251xfd_chip_interrupts_disable(priv);
1071 	mcp251xfd_chip_rx_int_disable(priv);
1072 	return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1073 }
1074 
1075 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1076 {
1077 	int err;
1078 
1079 	err = mcp251xfd_chip_softreset(priv);
1080 	if (err)
1081 		goto out_chip_stop;
1082 
1083 	err = mcp251xfd_chip_clock_init(priv);
1084 	if (err)
1085 		goto out_chip_stop;
1086 
1087 	err = mcp251xfd_set_bittiming(priv);
1088 	if (err)
1089 		goto out_chip_stop;
1090 
1091 	err = mcp251xfd_chip_rx_int_enable(priv);
1092 	if (err)
1093 		return err;
1094 
1095 	err = mcp251xfd_chip_ecc_init(priv);
1096 	if (err)
1097 		goto out_chip_stop;
1098 
1099 	mcp251xfd_ring_init(priv);
1100 
1101 	err = mcp251xfd_chip_fifo_init(priv);
1102 	if (err)
1103 		goto out_chip_stop;
1104 
1105 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1106 
1107 	err = mcp251xfd_chip_set_normal_mode(priv);
1108 	if (err)
1109 		goto out_chip_stop;
1110 
1111 	return 0;
1112 
1113  out_chip_stop:
1114 	mcp251xfd_dump(priv);
1115 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1116 
1117 	return err;
1118 }
1119 
1120 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1121 {
1122 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
1123 	int err;
1124 
1125 	switch (mode) {
1126 	case CAN_MODE_START:
1127 		err = mcp251xfd_chip_start(priv);
1128 		if (err)
1129 			return err;
1130 
1131 		err = mcp251xfd_chip_interrupts_enable(priv);
1132 		if (err) {
1133 			mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1134 			return err;
1135 		}
1136 
1137 		netif_wake_queue(ndev);
1138 		break;
1139 
1140 	default:
1141 		return -EOPNOTSUPP;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1148 					struct can_berr_counter *bec)
1149 {
1150 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1151 	u32 trec;
1152 	int err;
1153 
1154 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1155 	if (err)
1156 		return err;
1157 
1158 	if (trec & MCP251XFD_REG_TREC_TXBO)
1159 		bec->txerr = 256;
1160 	else
1161 		bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1162 	bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1163 
1164 	return 0;
1165 }
1166 
1167 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1168 				      struct can_berr_counter *bec)
1169 {
1170 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1171 
1172 	/* Avoid waking up the controller if the interface is down */
1173 	if (!(ndev->flags & IFF_UP))
1174 		return 0;
1175 
1176 	/* The controller is powered down during Bus Off, use saved
1177 	 * bec values.
1178 	 */
1179 	if (priv->can.state == CAN_STATE_BUS_OFF) {
1180 		*bec = priv->bec;
1181 		return 0;
1182 	}
1183 
1184 	return __mcp251xfd_get_berr_counter(ndev, bec);
1185 }
1186 
1187 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1188 {
1189 	u8 tef_tail_chip, tef_tail;
1190 	int err;
1191 
1192 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1193 		return 0;
1194 
1195 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1196 	if (err)
1197 		return err;
1198 
1199 	tef_tail = mcp251xfd_get_tef_tail(priv);
1200 	if (tef_tail_chip != tef_tail) {
1201 		netdev_err(priv->ndev,
1202 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1203 			   tef_tail_chip, tef_tail);
1204 		return -EILSEQ;
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int
1211 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1212 			const struct mcp251xfd_rx_ring *ring)
1213 {
1214 	u8 rx_tail_chip, rx_tail;
1215 	int err;
1216 
1217 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1218 		return 0;
1219 
1220 	err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1221 	if (err)
1222 		return err;
1223 
1224 	rx_tail = mcp251xfd_get_rx_tail(ring);
1225 	if (rx_tail_chip != rx_tail) {
1226 		netdev_err(priv->ndev,
1227 			   "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1228 			   rx_tail_chip, rx_tail);
1229 		return -EILSEQ;
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static int
1236 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1237 {
1238 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1239 	u32 tef_sta;
1240 	int err;
1241 
1242 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1243 	if (err)
1244 		return err;
1245 
1246 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1247 		netdev_err(priv->ndev,
1248 			   "Transmit Event FIFO buffer overflow.\n");
1249 		return -ENOBUFS;
1250 	}
1251 
1252 	netdev_info(priv->ndev,
1253 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
1254 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1255 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1256 		    "not empty" : "empty",
1257 		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1258 
1259 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
1260 	return -EAGAIN;
1261 }
1262 
1263 static int
1264 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1265 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
1266 {
1267 	struct net_device_stats *stats = &priv->ndev->stats;
1268 	struct sk_buff *skb;
1269 	u32 seq, seq_masked, tef_tail_masked, tef_tail;
1270 
1271 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1272 			hw_tef_obj->flags);
1273 
1274 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
1275 	 * compare 7 bits, this should be enough to detect
1276 	 * net-yet-completed, i.e. old TEF objects.
1277 	 */
1278 	seq_masked = seq &
1279 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1280 	tef_tail_masked = priv->tef->tail &
1281 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1282 	if (seq_masked != tef_tail_masked)
1283 		return mcp251xfd_handle_tefif_recover(priv, seq);
1284 
1285 	tef_tail = mcp251xfd_get_tef_tail(priv);
1286 	skb = priv->can.echo_skb[tef_tail];
1287 	if (skb)
1288 		mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
1289 	stats->tx_bytes +=
1290 		can_rx_offload_get_echo_skb(&priv->offload,
1291 					    tef_tail,
1292 					    hw_tef_obj->ts, NULL);
1293 	stats->tx_packets++;
1294 	priv->tef->tail++;
1295 
1296 	return 0;
1297 }
1298 
1299 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1300 {
1301 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1302 	unsigned int new_head;
1303 	u8 chip_tx_tail;
1304 	int err;
1305 
1306 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1307 	if (err)
1308 		return err;
1309 
1310 	/* chip_tx_tail, is the next TX-Object send by the HW.
1311 	 * The new TEF head must be >= the old head, ...
1312 	 */
1313 	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1314 	if (new_head <= priv->tef->head)
1315 		new_head += tx_ring->obj_num;
1316 
1317 	/* ... but it cannot exceed the TX head. */
1318 	priv->tef->head = min(new_head, tx_ring->head);
1319 
1320 	return mcp251xfd_check_tef_tail(priv);
1321 }
1322 
1323 static inline int
1324 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1325 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1326 		       const u8 offset, const u8 len)
1327 {
1328 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1329 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1330 
1331 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1332 	    (offset > tx_ring->obj_num ||
1333 	     len > tx_ring->obj_num ||
1334 	     offset + len > tx_ring->obj_num)) {
1335 		netdev_err(priv->ndev,
1336 			   "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1337 			   tx_ring->obj_num, offset, len);
1338 		return -ERANGE;
1339 	}
1340 
1341 	return regmap_bulk_read(priv->map_rx,
1342 				mcp251xfd_get_tef_obj_addr(offset),
1343 				hw_tef_obj,
1344 				sizeof(*hw_tef_obj) / val_bytes * len);
1345 }
1346 
1347 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1348 {
1349 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1350 	u8 tef_tail, len, l;
1351 	int err, i;
1352 
1353 	err = mcp251xfd_tef_ring_update(priv);
1354 	if (err)
1355 		return err;
1356 
1357 	tef_tail = mcp251xfd_get_tef_tail(priv);
1358 	len = mcp251xfd_get_tef_len(priv);
1359 	l = mcp251xfd_get_tef_linear_len(priv);
1360 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1361 	if (err)
1362 		return err;
1363 
1364 	if (l < len) {
1365 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1366 		if (err)
1367 			return err;
1368 	}
1369 
1370 	for (i = 0; i < len; i++) {
1371 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
1372 		/* -EAGAIN means the Sequence Number in the TEF
1373 		 * doesn't match our tef_tail. This can happen if we
1374 		 * read the TEF objects too early. Leave loop let the
1375 		 * interrupt handler call us again.
1376 		 */
1377 		if (err == -EAGAIN)
1378 			goto out_netif_wake_queue;
1379 		if (err)
1380 			return err;
1381 	}
1382 
1383  out_netif_wake_queue:
1384 	len = i;	/* number of handled goods TEFs */
1385 	if (len) {
1386 		struct mcp251xfd_tef_ring *ring = priv->tef;
1387 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1388 		int offset;
1389 
1390 		/* Increment the TEF FIFO tail pointer 'len' times in
1391 		 * a single SPI message.
1392 		 *
1393 		 * Note:
1394 		 * Calculate offset, so that the SPI transfer ends on
1395 		 * the last message of the uinc_xfer array, which has
1396 		 * "cs_change == 0", to properly deactivate the chip
1397 		 * select.
1398 		 */
1399 		offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1400 		err = spi_sync_transfer(priv->spi,
1401 					ring->uinc_xfer + offset, len);
1402 		if (err)
1403 			return err;
1404 
1405 		tx_ring->tail += len;
1406 
1407 		err = mcp251xfd_check_tef_tail(priv);
1408 		if (err)
1409 			return err;
1410 	}
1411 
1412 	mcp251xfd_ecc_tefif_successful(priv);
1413 
1414 	if (mcp251xfd_get_tx_free(priv->tx)) {
1415 		/* Make sure that anybody stopping the queue after
1416 		 * this sees the new tx_ring->tail.
1417 		 */
1418 		smp_mb();
1419 		netif_wake_queue(priv->ndev);
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static int
1426 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1427 			 struct mcp251xfd_rx_ring *ring)
1428 {
1429 	u32 new_head;
1430 	u8 chip_rx_head;
1431 	int err;
1432 
1433 	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1434 	if (err)
1435 		return err;
1436 
1437 	/* chip_rx_head, is the next RX-Object filled by the HW.
1438 	 * The new RX head must be >= the old head.
1439 	 */
1440 	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1441 	if (new_head <= ring->head)
1442 		new_head += ring->obj_num;
1443 
1444 	ring->head = new_head;
1445 
1446 	return mcp251xfd_check_rx_tail(priv, ring);
1447 }
1448 
1449 static void
1450 mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv,
1451 			   const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1452 			   struct sk_buff *skb)
1453 {
1454 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1455 	u8 dlc;
1456 
1457 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1458 		u32 sid, eid;
1459 
1460 		eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1461 		sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1462 
1463 		cfd->can_id = CAN_EFF_FLAG |
1464 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1465 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1466 	} else {
1467 		cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1468 					hw_rx_obj->id);
1469 	}
1470 
1471 	dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
1472 
1473 	/* CANFD */
1474 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1475 
1476 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1477 			cfd->flags |= CANFD_ESI;
1478 
1479 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1480 			cfd->flags |= CANFD_BRS;
1481 
1482 		cfd->len = can_fd_dlc2len(dlc);
1483 	} else {
1484 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1485 			cfd->can_id |= CAN_RTR_FLAG;
1486 
1487 		can_frame_set_cc_len((struct can_frame *)cfd, dlc,
1488 				     priv->can.ctrlmode);
1489 	}
1490 
1491 	if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
1492 		memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1493 
1494 	mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
1495 }
1496 
1497 static int
1498 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1499 			  struct mcp251xfd_rx_ring *ring,
1500 			  const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1501 {
1502 	struct net_device_stats *stats = &priv->ndev->stats;
1503 	struct sk_buff *skb;
1504 	struct canfd_frame *cfd;
1505 	int err;
1506 
1507 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1508 		skb = alloc_canfd_skb(priv->ndev, &cfd);
1509 	else
1510 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1511 
1512 	if (!skb) {
1513 		stats->rx_dropped++;
1514 		return 0;
1515 	}
1516 
1517 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1518 	err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1519 	if (err)
1520 		stats->rx_fifo_errors++;
1521 
1522 	return 0;
1523 }
1524 
1525 static inline int
1526 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1527 		      const struct mcp251xfd_rx_ring *ring,
1528 		      struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1529 		      const u8 offset, const u8 len)
1530 {
1531 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1532 	int err;
1533 
1534 	err = regmap_bulk_read(priv->map_rx,
1535 			       mcp251xfd_get_rx_obj_addr(ring, offset),
1536 			       hw_rx_obj,
1537 			       len * ring->obj_size / val_bytes);
1538 
1539 	return err;
1540 }
1541 
1542 static int
1543 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1544 			   struct mcp251xfd_rx_ring *ring)
1545 {
1546 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1547 	u8 rx_tail, len;
1548 	int err, i;
1549 
1550 	err = mcp251xfd_rx_ring_update(priv, ring);
1551 	if (err)
1552 		return err;
1553 
1554 	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1555 		int offset;
1556 
1557 		rx_tail = mcp251xfd_get_rx_tail(ring);
1558 
1559 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1560 					    rx_tail, len);
1561 		if (err)
1562 			return err;
1563 
1564 		for (i = 0; i < len; i++) {
1565 			err = mcp251xfd_handle_rxif_one(priv, ring,
1566 							(void *)hw_rx_obj +
1567 							i * ring->obj_size);
1568 			if (err)
1569 				return err;
1570 		}
1571 
1572 		/* Increment the RX FIFO tail pointer 'len' times in a
1573 		 * single SPI message.
1574 		 *
1575 		 * Note:
1576 		 * Calculate offset, so that the SPI transfer ends on
1577 		 * the last message of the uinc_xfer array, which has
1578 		 * "cs_change == 0", to properly deactivate the chip
1579 		 * select.
1580 		 */
1581 		offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1582 		err = spi_sync_transfer(priv->spi,
1583 					ring->uinc_xfer + offset, len);
1584 		if (err)
1585 			return err;
1586 
1587 		ring->tail += len;
1588 	}
1589 
1590 	return 0;
1591 }
1592 
1593 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1594 {
1595 	struct mcp251xfd_rx_ring *ring;
1596 	int err, n;
1597 
1598 	mcp251xfd_for_each_rx_ring(priv, ring, n) {
1599 		err = mcp251xfd_handle_rxif_ring(priv, ring);
1600 		if (err)
1601 			return err;
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 static struct sk_buff *
1608 mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
1609 			    struct can_frame **cf, u32 *timestamp)
1610 {
1611 	struct sk_buff *skb;
1612 	int err;
1613 
1614 	err = mcp251xfd_get_timestamp(priv, timestamp);
1615 	if (err)
1616 		return NULL;
1617 
1618 	skb = alloc_can_err_skb(priv->ndev, cf);
1619 	if (skb)
1620 		mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
1621 
1622 	return skb;
1623 }
1624 
1625 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1626 {
1627 	struct net_device_stats *stats = &priv->ndev->stats;
1628 	struct mcp251xfd_rx_ring *ring;
1629 	struct sk_buff *skb;
1630 	struct can_frame *cf;
1631 	u32 timestamp, rxovif;
1632 	int err, i;
1633 
1634 	stats->rx_over_errors++;
1635 	stats->rx_errors++;
1636 
1637 	err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1638 	if (err)
1639 		return err;
1640 
1641 	mcp251xfd_for_each_rx_ring(priv, ring, i) {
1642 		if (!(rxovif & BIT(ring->fifo_nr)))
1643 			continue;
1644 
1645 		/* If SERRIF is active, there was a RX MAB overflow. */
1646 		if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1647 			netdev_info(priv->ndev,
1648 				    "RX-%d: MAB overflow detected.\n",
1649 				    ring->nr);
1650 		} else {
1651 			netdev_info(priv->ndev,
1652 				    "RX-%d: FIFO overflow.\n", ring->nr);
1653 		}
1654 
1655 		err = regmap_update_bits(priv->map_reg,
1656 					 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1657 					 MCP251XFD_REG_FIFOSTA_RXOVIF,
1658 					 0x0);
1659 		if (err)
1660 			return err;
1661 	}
1662 
1663 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1664 	if (!skb)
1665 		return 0;
1666 
1667 	cf->can_id |= CAN_ERR_CRTL;
1668 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1669 
1670 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1671 	if (err)
1672 		stats->rx_fifo_errors++;
1673 
1674 	return 0;
1675 }
1676 
1677 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1678 {
1679 	netdev_info(priv->ndev, "%s\n", __func__);
1680 
1681 	return 0;
1682 }
1683 
1684 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1685 {
1686 	struct net_device_stats *stats = &priv->ndev->stats;
1687 	u32 bdiag1, timestamp;
1688 	struct sk_buff *skb;
1689 	struct can_frame *cf = NULL;
1690 	int err;
1691 
1692 	err = mcp251xfd_get_timestamp(priv, &timestamp);
1693 	if (err)
1694 		return err;
1695 
1696 	err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1697 	if (err)
1698 		return err;
1699 
1700 	/* Write 0s to clear error bits, don't write 1s to non active
1701 	 * bits, as they will be set.
1702 	 */
1703 	err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1704 	if (err)
1705 		return err;
1706 
1707 	priv->can.can_stats.bus_error++;
1708 
1709 	skb = alloc_can_err_skb(priv->ndev, &cf);
1710 	if (cf)
1711 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1712 
1713 	/* Controller misconfiguration */
1714 	if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1715 		netdev_err(priv->ndev,
1716 			   "recv'd DLC is larger than PLSIZE of FIFO element.");
1717 
1718 	/* RX errors */
1719 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1720 		      MCP251XFD_REG_BDIAG1_NCRCERR)) {
1721 		netdev_dbg(priv->ndev, "CRC error\n");
1722 
1723 		stats->rx_errors++;
1724 		if (cf)
1725 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1726 	}
1727 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1728 		      MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1729 		netdev_dbg(priv->ndev, "Stuff error\n");
1730 
1731 		stats->rx_errors++;
1732 		if (cf)
1733 			cf->data[2] |= CAN_ERR_PROT_STUFF;
1734 	}
1735 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1736 		      MCP251XFD_REG_BDIAG1_NFORMERR)) {
1737 		netdev_dbg(priv->ndev, "Format error\n");
1738 
1739 		stats->rx_errors++;
1740 		if (cf)
1741 			cf->data[2] |= CAN_ERR_PROT_FORM;
1742 	}
1743 
1744 	/* TX errors */
1745 	if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1746 		netdev_dbg(priv->ndev, "NACK error\n");
1747 
1748 		stats->tx_errors++;
1749 		if (cf) {
1750 			cf->can_id |= CAN_ERR_ACK;
1751 			cf->data[2] |= CAN_ERR_PROT_TX;
1752 		}
1753 	}
1754 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1755 		      MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1756 		netdev_dbg(priv->ndev, "Bit1 error\n");
1757 
1758 		stats->tx_errors++;
1759 		if (cf)
1760 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1761 	}
1762 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1763 		      MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1764 		netdev_dbg(priv->ndev, "Bit0 error\n");
1765 
1766 		stats->tx_errors++;
1767 		if (cf)
1768 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1769 	}
1770 
1771 	if (!cf)
1772 		return 0;
1773 
1774 	mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
1775 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1776 	if (err)
1777 		stats->rx_fifo_errors++;
1778 
1779 	return 0;
1780 }
1781 
1782 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1783 {
1784 	struct net_device_stats *stats = &priv->ndev->stats;
1785 	struct sk_buff *skb;
1786 	struct can_frame *cf = NULL;
1787 	enum can_state new_state, rx_state, tx_state;
1788 	u32 trec, timestamp;
1789 	int err;
1790 
1791 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1792 	if (err)
1793 		return err;
1794 
1795 	if (trec & MCP251XFD_REG_TREC_TXBO)
1796 		tx_state = CAN_STATE_BUS_OFF;
1797 	else if (trec & MCP251XFD_REG_TREC_TXBP)
1798 		tx_state = CAN_STATE_ERROR_PASSIVE;
1799 	else if (trec & MCP251XFD_REG_TREC_TXWARN)
1800 		tx_state = CAN_STATE_ERROR_WARNING;
1801 	else
1802 		tx_state = CAN_STATE_ERROR_ACTIVE;
1803 
1804 	if (trec & MCP251XFD_REG_TREC_RXBP)
1805 		rx_state = CAN_STATE_ERROR_PASSIVE;
1806 	else if (trec & MCP251XFD_REG_TREC_RXWARN)
1807 		rx_state = CAN_STATE_ERROR_WARNING;
1808 	else
1809 		rx_state = CAN_STATE_ERROR_ACTIVE;
1810 
1811 	new_state = max(tx_state, rx_state);
1812 	if (new_state == priv->can.state)
1813 		return 0;
1814 
1815 	/* The skb allocation might fail, but can_change_state()
1816 	 * handles cf == NULL.
1817 	 */
1818 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1819 	can_change_state(priv->ndev, cf, tx_state, rx_state);
1820 
1821 	if (new_state == CAN_STATE_BUS_OFF) {
1822 		/* As we're going to switch off the chip now, let's
1823 		 * save the error counters and return them to
1824 		 * userspace, if do_get_berr_counter() is called while
1825 		 * the chip is in Bus Off.
1826 		 */
1827 		err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1828 		if (err)
1829 			return err;
1830 
1831 		mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1832 		can_bus_off(priv->ndev);
1833 	}
1834 
1835 	if (!skb)
1836 		return 0;
1837 
1838 	if (new_state != CAN_STATE_BUS_OFF) {
1839 		struct can_berr_counter bec;
1840 
1841 		err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1842 		if (err)
1843 			return err;
1844 		cf->data[6] = bec.txerr;
1845 		cf->data[7] = bec.rxerr;
1846 	}
1847 
1848 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1849 	if (err)
1850 		stats->rx_fifo_errors++;
1851 
1852 	return 0;
1853 }
1854 
1855 static int
1856 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1857 {
1858 	const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1859 	u8 mode;
1860 	int err;
1861 
1862 	err = mcp251xfd_chip_get_mode(priv, &mode);
1863 	if (err)
1864 		return err;
1865 
1866 	if (mode == mode_reference) {
1867 		netdev_dbg(priv->ndev,
1868 			   "Controller changed into %s Mode (%u).\n",
1869 			   mcp251xfd_get_mode_str(mode), mode);
1870 		return 0;
1871 	}
1872 
1873 	/* According to MCP2517FD errata DS80000792B 1., during a TX
1874 	 * MAB underflow, the controller will transition to Restricted
1875 	 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1876 	 *
1877 	 * However this is not always the case. If SERR2LOM is
1878 	 * configured for Restricted Operation Mode (SERR2LOM not set)
1879 	 * the MCP2517FD will sometimes transition to Listen Only Mode
1880 	 * first. When polling this bit we see that it will transition
1881 	 * to Restricted Operation Mode shortly after.
1882 	 */
1883 	if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1884 	    (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1885 	     mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1886 		netdev_dbg(priv->ndev,
1887 			   "Controller changed into %s Mode (%u).\n",
1888 			   mcp251xfd_get_mode_str(mode), mode);
1889 	else
1890 		netdev_err(priv->ndev,
1891 			   "Controller changed into %s Mode (%u).\n",
1892 			   mcp251xfd_get_mode_str(mode), mode);
1893 
1894 	/* After the application requests Normal mode, the controller
1895 	 * will automatically attempt to retransmit the message that
1896 	 * caused the TX MAB underflow.
1897 	 *
1898 	 * However, if there is an ECC error in the TX-RAM, we first
1899 	 * have to reload the tx-object before requesting Normal
1900 	 * mode. This is done later in mcp251xfd_handle_eccif().
1901 	 */
1902 	if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1903 		*set_normal_mode = true;
1904 		return 0;
1905 	}
1906 
1907 	return mcp251xfd_chip_set_normal_mode_nowait(priv);
1908 }
1909 
1910 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1911 {
1912 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1913 	struct net_device_stats *stats = &priv->ndev->stats;
1914 	bool handled = false;
1915 
1916 	/* TX MAB underflow
1917 	 *
1918 	 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1919 	 * underflow is indicated by SERRIF and MODIF.
1920 	 *
1921 	 * In addition to the effects mentioned in the Errata, there
1922 	 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1923 	 * will be seen as well.
1924 	 *
1925 	 * Sometimes there is an ECC error in the TX-RAM, which leads
1926 	 * to a TX MAB underflow.
1927 	 *
1928 	 * However, probably due to a race condition, there is no
1929 	 * associated MODIF pending.
1930 	 *
1931 	 * Further, there are situations, where the SERRIF is caused
1932 	 * by an ECC error in the TX-RAM, but not even the ECCIF is
1933 	 * set. This only seems to happen _after_ the first occurrence
1934 	 * of a ECCIF (which is tracked in ecc->cnt).
1935 	 *
1936 	 * Treat all as a known system errors..
1937 	 */
1938 	if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1939 	     priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1940 	    priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1941 	    ecc->cnt) {
1942 		const char *msg;
1943 
1944 		if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1945 		    ecc->cnt)
1946 			msg = "TX MAB underflow due to ECC error detected.";
1947 		else
1948 			msg = "TX MAB underflow detected.";
1949 
1950 		if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1951 			netdev_dbg(priv->ndev, "%s\n", msg);
1952 		else
1953 			netdev_info(priv->ndev, "%s\n", msg);
1954 
1955 		stats->tx_aborted_errors++;
1956 		stats->tx_errors++;
1957 		handled = true;
1958 	}
1959 
1960 	/* RX MAB overflow
1961 	 *
1962 	 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1963 	 * overflow is indicated by SERRIF.
1964 	 *
1965 	 * In addition to the effects mentioned in the Errata, (most
1966 	 * of the times) a RXOVIF is raised, if the FIFO that is being
1967 	 * received into has the RXOVIE activated (and we have enabled
1968 	 * RXOVIE on all FIFOs).
1969 	 *
1970 	 * Sometimes there is no RXOVIF just a RXIF is pending.
1971 	 *
1972 	 * Treat all as a known system errors..
1973 	 */
1974 	if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1975 	    priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1976 		stats->rx_dropped++;
1977 		handled = true;
1978 	}
1979 
1980 	if (!handled)
1981 		netdev_err(priv->ndev,
1982 			   "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1983 			   priv->regs_status.intf);
1984 
1985 	return 0;
1986 }
1987 
1988 static int
1989 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1990 {
1991 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1992 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1993 	struct mcp251xfd_tx_obj *tx_obj;
1994 	u8 chip_tx_tail, tx_tail, offset;
1995 	u16 addr;
1996 	int err;
1997 
1998 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
1999 
2000 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
2001 	if (err)
2002 		return err;
2003 
2004 	tx_tail = mcp251xfd_get_tx_tail(tx_ring);
2005 	offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
2006 
2007 	/* Bail out if one of the following is met:
2008 	 * - tx_tail information is inconsistent
2009 	 * - for mcp2517fd: offset not 0
2010 	 * - for mcp2518fd: offset not 0 or 1
2011 	 */
2012 	if (chip_tx_tail != tx_tail ||
2013 	    !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
2014 		netdev_err(priv->ndev,
2015 			   "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2016 			   addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2017 			   offset);
2018 		return -EINVAL;
2019 	}
2020 
2021 	netdev_info(priv->ndev,
2022 		    "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2023 		    ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2024 		    "Single" : "Double",
2025 		    addr, nr, tx_ring->tail, tx_tail, offset);
2026 
2027 	/* reload tx_obj into controller RAM ... */
2028 	tx_obj = &tx_ring->obj[nr];
2029 	err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2030 	if (err)
2031 		return err;
2032 
2033 	/* ... and trigger retransmit */
2034 	return mcp251xfd_chip_set_normal_mode(priv);
2035 }
2036 
2037 static int
2038 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2039 {
2040 	struct mcp251xfd_ecc *ecc = &priv->ecc;
2041 	const char *msg;
2042 	bool in_tx_ram;
2043 	u32 ecc_stat;
2044 	u16 addr;
2045 	u8 nr;
2046 	int err;
2047 
2048 	err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2049 	if (err)
2050 		return err;
2051 
2052 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2053 				 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2054 	if (err)
2055 		return err;
2056 
2057 	/* Check if ECC error occurred in TX-RAM */
2058 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2059 	err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2060 	if (!err)
2061 		in_tx_ram = true;
2062 	else if (err == -ENOENT)
2063 		in_tx_ram = false;
2064 	else
2065 		return err;
2066 
2067 	/* Errata Reference:
2068 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
2069 	 *
2070 	 * ECC single error correction does not work in all cases:
2071 	 *
2072 	 * Fix/Work Around:
2073 	 * Enable single error correction and double error detection
2074 	 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
2075 	 * detection interrupt and do not rely on the error
2076 	 * correction. Instead, handle both interrupts as a
2077 	 * notification that the RAM word at ERRADDR was corrupted.
2078 	 */
2079 	if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2080 		msg = "Single ECC Error detected at address";
2081 	else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2082 		msg = "Double ECC Error detected at address";
2083 	else
2084 		return -EINVAL;
2085 
2086 	if (!in_tx_ram) {
2087 		ecc->ecc_stat = 0;
2088 
2089 		netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2090 	} else {
2091 		/* Re-occurring error? */
2092 		if (ecc->ecc_stat == ecc_stat) {
2093 			ecc->cnt++;
2094 		} else {
2095 			ecc->ecc_stat = ecc_stat;
2096 			ecc->cnt = 1;
2097 		}
2098 
2099 		netdev_info(priv->ndev,
2100 			    "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2101 			    msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2102 
2103 		if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2104 			return mcp251xfd_handle_eccif_recover(priv, nr);
2105 	}
2106 
2107 	if (set_normal_mode)
2108 		return mcp251xfd_chip_set_normal_mode_nowait(priv);
2109 
2110 	return 0;
2111 }
2112 
2113 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2114 {
2115 	int err;
2116 	u32 crc;
2117 
2118 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2119 	if (err)
2120 		return err;
2121 
2122 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2123 				 MCP251XFD_REG_CRC_IF_MASK,
2124 				 ~crc);
2125 	if (err)
2126 		return err;
2127 
2128 	if (crc & MCP251XFD_REG_CRC_FERRIF)
2129 		netdev_notice(priv->ndev, "CRC write command format error.\n");
2130 	else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2131 		netdev_notice(priv->ndev,
2132 			      "CRC write error detected. CRC=0x%04lx.\n",
2133 			      FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2134 
2135 	return 0;
2136 }
2137 
2138 #define mcp251xfd_handle(priv, irq, ...) \
2139 ({ \
2140 	struct mcp251xfd_priv *_priv = (priv); \
2141 	int err; \
2142 \
2143 	err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2144 	if (err) \
2145 		netdev_err(_priv->ndev, \
2146 			"IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2147 			__stringify(irq), err); \
2148 	err; \
2149 })
2150 
2151 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2152 {
2153 	struct mcp251xfd_priv *priv = dev_id;
2154 	const int val_bytes = regmap_get_val_bytes(priv->map_reg);
2155 	irqreturn_t handled = IRQ_NONE;
2156 	int err;
2157 
2158 	if (priv->rx_int)
2159 		do {
2160 			int rx_pending;
2161 
2162 			rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2163 			if (!rx_pending)
2164 				break;
2165 
2166 			err = mcp251xfd_handle(priv, rxif);
2167 			if (err)
2168 				goto out_fail;
2169 
2170 			handled = IRQ_HANDLED;
2171 		} while (1);
2172 
2173 	do {
2174 		u32 intf_pending, intf_pending_clearable;
2175 		bool set_normal_mode = false;
2176 
2177 		err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2178 				       &priv->regs_status,
2179 				       sizeof(priv->regs_status) /
2180 				       val_bytes);
2181 		if (err)
2182 			goto out_fail;
2183 
2184 		intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2185 					 priv->regs_status.intf) &
2186 			FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2187 				  priv->regs_status.intf);
2188 
2189 		if (!(intf_pending))
2190 			return handled;
2191 
2192 		/* Some interrupts must be ACKed in the
2193 		 * MCP251XFD_REG_INT register.
2194 		 * - First ACK then handle, to avoid lost-IRQ race
2195 		 *   condition on fast re-occurring interrupts.
2196 		 * - Write "0" to clear active IRQs, "1" to all other,
2197 		 *   to avoid r/m/w race condition on the
2198 		 *   MCP251XFD_REG_INT register.
2199 		 */
2200 		intf_pending_clearable = intf_pending &
2201 			MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2202 		if (intf_pending_clearable) {
2203 			err = regmap_update_bits(priv->map_reg,
2204 						 MCP251XFD_REG_INT,
2205 						 MCP251XFD_REG_INT_IF_MASK,
2206 						 ~intf_pending_clearable);
2207 			if (err)
2208 				goto out_fail;
2209 		}
2210 
2211 		if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2212 			err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2213 			if (err)
2214 				goto out_fail;
2215 		}
2216 
2217 		if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2218 			err = mcp251xfd_handle(priv, rxif);
2219 			if (err)
2220 				goto out_fail;
2221 		}
2222 
2223 		if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2224 			err = mcp251xfd_handle(priv, tefif);
2225 			if (err)
2226 				goto out_fail;
2227 		}
2228 
2229 		if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2230 			err = mcp251xfd_handle(priv, rxovif);
2231 			if (err)
2232 				goto out_fail;
2233 		}
2234 
2235 		if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2236 			err = mcp251xfd_handle(priv, txatif);
2237 			if (err)
2238 				goto out_fail;
2239 		}
2240 
2241 		if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2242 			err = mcp251xfd_handle(priv, ivmif);
2243 			if (err)
2244 				goto out_fail;
2245 		}
2246 
2247 		if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2248 			err = mcp251xfd_handle(priv, serrif);
2249 			if (err)
2250 				goto out_fail;
2251 		}
2252 
2253 		if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2254 			err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2255 			if (err)
2256 				goto out_fail;
2257 		}
2258 
2259 		if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2260 			err = mcp251xfd_handle(priv, spicrcif);
2261 			if (err)
2262 				goto out_fail;
2263 		}
2264 
2265 		/* On the MCP2527FD and MCP2518FD, we don't get a
2266 		 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2267 		 * ERROR_ACTIVE.
2268 		 */
2269 		if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2270 		    priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2271 			err = mcp251xfd_handle(priv, cerrif);
2272 			if (err)
2273 				goto out_fail;
2274 
2275 			/* In Bus Off we completely shut down the
2276 			 * controller. Every subsequent register read
2277 			 * will read bogus data, and if
2278 			 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2279 			 * check will fail, too. So leave IRQ handler
2280 			 * directly.
2281 			 */
2282 			if (priv->can.state == CAN_STATE_BUS_OFF)
2283 				return IRQ_HANDLED;
2284 		}
2285 
2286 		handled = IRQ_HANDLED;
2287 	} while (1);
2288 
2289  out_fail:
2290 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2291 		   err, priv->regs_status.intf);
2292 	mcp251xfd_dump(priv);
2293 	mcp251xfd_chip_interrupts_disable(priv);
2294 
2295 	return handled;
2296 }
2297 
2298 static inline struct
2299 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2300 {
2301 	u8 tx_head;
2302 
2303 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2304 
2305 	return &tx_ring->obj[tx_head];
2306 }
2307 
2308 static void
2309 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2310 			  struct mcp251xfd_tx_obj *tx_obj,
2311 			  const struct sk_buff *skb,
2312 			  unsigned int seq)
2313 {
2314 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2315 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2316 	union mcp251xfd_tx_obj_load_buf *load_buf;
2317 	u8 dlc;
2318 	u32 id, flags;
2319 	int len_sanitized = 0, len;
2320 
2321 	if (cfd->can_id & CAN_EFF_FLAG) {
2322 		u32 sid, eid;
2323 
2324 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2325 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2326 
2327 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2328 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2329 
2330 		flags = MCP251XFD_OBJ_FLAGS_IDE;
2331 	} else {
2332 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2333 		flags = 0;
2334 	}
2335 
2336 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2337 	 * harm, only the lower 7 bits will be transferred into the
2338 	 * TEF object.
2339 	 */
2340 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
2341 
2342 	if (cfd->can_id & CAN_RTR_FLAG)
2343 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
2344 	else
2345 		len_sanitized = canfd_sanitize_len(cfd->len);
2346 
2347 	/* CANFD */
2348 	if (can_is_canfd_skb(skb)) {
2349 		if (cfd->flags & CANFD_ESI)
2350 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
2351 
2352 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
2353 
2354 		if (cfd->flags & CANFD_BRS)
2355 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
2356 
2357 		dlc = can_fd_len2dlc(cfd->len);
2358 	} else {
2359 		dlc = can_get_cc_dlc((struct can_frame *)cfd,
2360 				     priv->can.ctrlmode);
2361 	}
2362 
2363 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
2364 
2365 	load_buf = &tx_obj->buf;
2366 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2367 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
2368 	else
2369 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2370 
2371 	put_unaligned_le32(id, &hw_tx_obj->id);
2372 	put_unaligned_le32(flags, &hw_tx_obj->flags);
2373 
2374 	/* Copy data */
2375 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2376 
2377 	/* Clear unused data at end of CAN frame */
2378 	if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
2379 		int pad_len;
2380 
2381 		pad_len = len_sanitized - cfd->len;
2382 		if (pad_len)
2383 			memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
2384 	}
2385 
2386 	/* Number of bytes to be written into the RAM of the controller */
2387 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2388 	if (MCP251XFD_SANITIZE_CAN)
2389 		len += round_up(len_sanitized, sizeof(u32));
2390 	else
2391 		len += round_up(cfd->len, sizeof(u32));
2392 
2393 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2394 		u16 crc;
2395 
2396 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2397 						     len);
2398 		/* CRC */
2399 		len += sizeof(load_buf->crc.cmd);
2400 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2401 		put_unaligned_be16(crc, (void *)load_buf + len);
2402 
2403 		/* Total length */
2404 		len += sizeof(load_buf->crc.crc);
2405 	} else {
2406 		len += sizeof(load_buf->nocrc.cmd);
2407 	}
2408 
2409 	tx_obj->xfer[0].len = len;
2410 }
2411 
2412 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2413 				  struct mcp251xfd_tx_obj *tx_obj)
2414 {
2415 	return spi_async(priv->spi, &tx_obj->msg);
2416 }
2417 
2418 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2419 			      struct mcp251xfd_tx_ring *tx_ring)
2420 {
2421 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
2422 		return false;
2423 
2424 	netif_stop_queue(priv->ndev);
2425 
2426 	/* Memory barrier before checking tx_free (head and tail) */
2427 	smp_mb();
2428 
2429 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2430 		netdev_dbg(priv->ndev,
2431 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2432 			   tx_ring->head, tx_ring->tail,
2433 			   tx_ring->head - tx_ring->tail);
2434 
2435 		return true;
2436 	}
2437 
2438 	netif_start_queue(priv->ndev);
2439 
2440 	return false;
2441 }
2442 
2443 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2444 					struct net_device *ndev)
2445 {
2446 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2447 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2448 	struct mcp251xfd_tx_obj *tx_obj;
2449 	u8 tx_head;
2450 	int err;
2451 
2452 	if (can_dropped_invalid_skb(ndev, skb))
2453 		return NETDEV_TX_OK;
2454 
2455 	if (mcp251xfd_tx_busy(priv, tx_ring))
2456 		return NETDEV_TX_BUSY;
2457 
2458 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2459 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2460 
2461 	/* Stop queue if we occupy the complete TX FIFO */
2462 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2463 	tx_ring->head++;
2464 	if (mcp251xfd_get_tx_free(tx_ring) == 0)
2465 		netif_stop_queue(ndev);
2466 
2467 	can_put_echo_skb(skb, ndev, tx_head, 0);
2468 
2469 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
2470 	if (err)
2471 		goto out_err;
2472 
2473 	return NETDEV_TX_OK;
2474 
2475  out_err:
2476 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2477 
2478 	return NETDEV_TX_OK;
2479 }
2480 
2481 static int mcp251xfd_open(struct net_device *ndev)
2482 {
2483 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2484 	const struct spi_device *spi = priv->spi;
2485 	int err;
2486 
2487 	err = pm_runtime_get_sync(ndev->dev.parent);
2488 	if (err < 0) {
2489 		pm_runtime_put_noidle(ndev->dev.parent);
2490 		return err;
2491 	}
2492 
2493 	err = open_candev(ndev);
2494 	if (err)
2495 		goto out_pm_runtime_put;
2496 
2497 	err = mcp251xfd_ring_alloc(priv);
2498 	if (err)
2499 		goto out_close_candev;
2500 
2501 	err = mcp251xfd_transceiver_enable(priv);
2502 	if (err)
2503 		goto out_mcp251xfd_ring_free;
2504 
2505 	err = mcp251xfd_chip_start(priv);
2506 	if (err)
2507 		goto out_transceiver_disable;
2508 
2509 	mcp251xfd_timestamp_init(priv);
2510 	can_rx_offload_enable(&priv->offload);
2511 
2512 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2513 				   IRQF_ONESHOT, dev_name(&spi->dev),
2514 				   priv);
2515 	if (err)
2516 		goto out_can_rx_offload_disable;
2517 
2518 	err = mcp251xfd_chip_interrupts_enable(priv);
2519 	if (err)
2520 		goto out_free_irq;
2521 
2522 	netif_start_queue(ndev);
2523 
2524 	return 0;
2525 
2526  out_free_irq:
2527 	free_irq(spi->irq, priv);
2528  out_can_rx_offload_disable:
2529 	can_rx_offload_disable(&priv->offload);
2530 	mcp251xfd_timestamp_stop(priv);
2531  out_transceiver_disable:
2532 	mcp251xfd_transceiver_disable(priv);
2533  out_mcp251xfd_ring_free:
2534 	mcp251xfd_ring_free(priv);
2535  out_close_candev:
2536 	close_candev(ndev);
2537  out_pm_runtime_put:
2538 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2539 	pm_runtime_put(ndev->dev.parent);
2540 
2541 	return err;
2542 }
2543 
2544 static int mcp251xfd_stop(struct net_device *ndev)
2545 {
2546 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2547 
2548 	netif_stop_queue(ndev);
2549 	mcp251xfd_chip_interrupts_disable(priv);
2550 	free_irq(ndev->irq, priv);
2551 	can_rx_offload_disable(&priv->offload);
2552 	mcp251xfd_timestamp_stop(priv);
2553 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2554 	mcp251xfd_transceiver_disable(priv);
2555 	mcp251xfd_ring_free(priv);
2556 	close_candev(ndev);
2557 
2558 	pm_runtime_put(ndev->dev.parent);
2559 
2560 	return 0;
2561 }
2562 
2563 static const struct net_device_ops mcp251xfd_netdev_ops = {
2564 	.ndo_open = mcp251xfd_open,
2565 	.ndo_stop = mcp251xfd_stop,
2566 	.ndo_start_xmit	= mcp251xfd_start_xmit,
2567 	.ndo_change_mtu = can_change_mtu,
2568 };
2569 
2570 static void
2571 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2572 {
2573 	const struct spi_device *spi = priv->spi;
2574 	const struct spi_controller *ctlr = spi->controller;
2575 
2576 	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2577 		priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2578 }
2579 
2580 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2581 {
2582 	const struct net_device *ndev = priv->ndev;
2583 	const struct mcp251xfd_devtype_data *devtype_data;
2584 	u32 osc;
2585 	int err;
2586 
2587 	/* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2588 	 * autodetect the model.
2589 	 */
2590 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2591 				 MCP251XFD_REG_OSC_LPMEN,
2592 				 MCP251XFD_REG_OSC_LPMEN);
2593 	if (err)
2594 		return err;
2595 
2596 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2597 	if (err)
2598 		return err;
2599 
2600 	if (osc & MCP251XFD_REG_OSC_LPMEN)
2601 		devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2602 	else
2603 		devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2604 
2605 	if (!mcp251xfd_is_251X(priv) &&
2606 	    priv->devtype_data.model != devtype_data->model) {
2607 		netdev_info(ndev,
2608 			    "Detected %s, but firmware specifies a %s. Fixing up.",
2609 			    __mcp251xfd_get_model_str(devtype_data->model),
2610 			    mcp251xfd_get_model_str(priv));
2611 	}
2612 	priv->devtype_data = *devtype_data;
2613 
2614 	/* We need to preserve the Half Duplex Quirk. */
2615 	mcp251xfd_register_quirks(priv);
2616 
2617 	/* Re-init regmap with quirks of detected model. */
2618 	return mcp251xfd_regmap_init(priv);
2619 }
2620 
2621 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2622 {
2623 	int err, rx_pending;
2624 
2625 	if (!priv->rx_int)
2626 		return 0;
2627 
2628 	err = mcp251xfd_chip_rx_int_enable(priv);
2629 	if (err)
2630 		return err;
2631 
2632 	/* Check if RX_INT is properly working. The RX_INT should not
2633 	 * be active after a softreset.
2634 	 */
2635 	rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2636 
2637 	err = mcp251xfd_chip_rx_int_disable(priv);
2638 	if (err)
2639 		return err;
2640 
2641 	if (!rx_pending)
2642 		return 0;
2643 
2644 	netdev_info(priv->ndev,
2645 		    "RX_INT active after softreset, disabling RX_INT support.");
2646 	devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2647 	priv->rx_int = NULL;
2648 
2649 	return 0;
2650 }
2651 
2652 static int
2653 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2654 			      u32 *dev_id, u32 *effective_speed_hz)
2655 {
2656 	struct mcp251xfd_map_buf_nocrc *buf_rx;
2657 	struct mcp251xfd_map_buf_nocrc *buf_tx;
2658 	struct spi_transfer xfer[2] = { };
2659 	int err;
2660 
2661 	buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2662 	if (!buf_rx)
2663 		return -ENOMEM;
2664 
2665 	buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2666 	if (!buf_tx) {
2667 		err = -ENOMEM;
2668 		goto out_kfree_buf_rx;
2669 	}
2670 
2671 	xfer[0].tx_buf = buf_tx;
2672 	xfer[0].len = sizeof(buf_tx->cmd);
2673 	xfer[1].rx_buf = buf_rx->data;
2674 	xfer[1].len = sizeof(dev_id);
2675 
2676 	mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2677 	err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2678 	if (err)
2679 		goto out_kfree_buf_tx;
2680 
2681 	*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2682 	*effective_speed_hz = xfer->effective_speed_hz;
2683 
2684  out_kfree_buf_tx:
2685 	kfree(buf_tx);
2686  out_kfree_buf_rx:
2687 	kfree(buf_rx);
2688 
2689 	return 0;
2690 }
2691 
2692 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2693 	(priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2694 
2695 static int
2696 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2697 {
2698 	u32 dev_id, effective_speed_hz;
2699 	int err;
2700 
2701 	err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2702 					    &effective_speed_hz);
2703 	if (err)
2704 		return err;
2705 
2706 	netdev_info(priv->ndev,
2707 		    "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2708 		    mcp251xfd_get_model_str(priv),
2709 		    FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2710 		    FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2711 		    priv->rx_int ? '+' : '-',
2712 		    MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2713 		    MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2714 		    MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2715 		    MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2716 		    MCP251XFD_QUIRK_ACTIVE(ECC),
2717 		    MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2718 		    priv->can.clock.freq / 1000000,
2719 		    priv->can.clock.freq % 1000000 / 1000 / 10,
2720 		    priv->spi_max_speed_hz_orig / 1000000,
2721 		    priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2722 		    priv->spi->max_speed_hz / 1000000,
2723 		    priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2724 		    effective_speed_hz / 1000000,
2725 		    effective_speed_hz % 1000000 / 1000 / 10);
2726 
2727 	return 0;
2728 }
2729 
2730 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2731 {
2732 	struct net_device *ndev = priv->ndev;
2733 	int err;
2734 
2735 	err = mcp251xfd_clks_and_vdd_enable(priv);
2736 	if (err)
2737 		return err;
2738 
2739 	pm_runtime_get_noresume(ndev->dev.parent);
2740 	err = pm_runtime_set_active(ndev->dev.parent);
2741 	if (err)
2742 		goto out_runtime_put_noidle;
2743 	pm_runtime_enable(ndev->dev.parent);
2744 
2745 	mcp251xfd_register_quirks(priv);
2746 
2747 	err = mcp251xfd_chip_softreset(priv);
2748 	if (err == -ENODEV)
2749 		goto out_runtime_disable;
2750 	if (err)
2751 		goto out_chip_set_mode_sleep;
2752 
2753 	err = mcp251xfd_register_chip_detect(priv);
2754 	if (err)
2755 		goto out_chip_set_mode_sleep;
2756 
2757 	err = mcp251xfd_register_check_rx_int(priv);
2758 	if (err)
2759 		goto out_chip_set_mode_sleep;
2760 
2761 	err = register_candev(ndev);
2762 	if (err)
2763 		goto out_chip_set_mode_sleep;
2764 
2765 	err = mcp251xfd_register_done(priv);
2766 	if (err)
2767 		goto out_unregister_candev;
2768 
2769 	/* Put controller into sleep mode and let pm_runtime_put()
2770 	 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2771 	 * the clocks and vdd will stay powered.
2772 	 */
2773 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2774 	if (err)
2775 		goto out_unregister_candev;
2776 
2777 	pm_runtime_put(ndev->dev.parent);
2778 
2779 	return 0;
2780 
2781  out_unregister_candev:
2782 	unregister_candev(ndev);
2783  out_chip_set_mode_sleep:
2784 	mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2785  out_runtime_disable:
2786 	pm_runtime_disable(ndev->dev.parent);
2787  out_runtime_put_noidle:
2788 	pm_runtime_put_noidle(ndev->dev.parent);
2789 	mcp251xfd_clks_and_vdd_disable(priv);
2790 
2791 	return err;
2792 }
2793 
2794 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2795 {
2796 	struct net_device *ndev	= priv->ndev;
2797 
2798 	unregister_candev(ndev);
2799 
2800 	pm_runtime_get_sync(ndev->dev.parent);
2801 	pm_runtime_put_noidle(ndev->dev.parent);
2802 	mcp251xfd_clks_and_vdd_disable(priv);
2803 	pm_runtime_disable(ndev->dev.parent);
2804 }
2805 
2806 static const struct of_device_id mcp251xfd_of_match[] = {
2807 	{
2808 		.compatible = "microchip,mcp2517fd",
2809 		.data = &mcp251xfd_devtype_data_mcp2517fd,
2810 	}, {
2811 		.compatible = "microchip,mcp2518fd",
2812 		.data = &mcp251xfd_devtype_data_mcp2518fd,
2813 	}, {
2814 		.compatible = "microchip,mcp251xfd",
2815 		.data = &mcp251xfd_devtype_data_mcp251xfd,
2816 	}, {
2817 		/* sentinel */
2818 	},
2819 };
2820 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2821 
2822 static const struct spi_device_id mcp251xfd_id_table[] = {
2823 	{
2824 		.name = "mcp2517fd",
2825 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2826 	}, {
2827 		.name = "mcp2518fd",
2828 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2829 	}, {
2830 		.name = "mcp251xfd",
2831 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2832 	}, {
2833 		/* sentinel */
2834 	},
2835 };
2836 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2837 
2838 static int mcp251xfd_probe(struct spi_device *spi)
2839 {
2840 	const void *match;
2841 	struct net_device *ndev;
2842 	struct mcp251xfd_priv *priv;
2843 	struct gpio_desc *rx_int;
2844 	struct regulator *reg_vdd, *reg_xceiver;
2845 	struct clk *clk;
2846 	u32 freq;
2847 	int err;
2848 
2849 	if (!spi->irq)
2850 		return dev_err_probe(&spi->dev, -ENXIO,
2851 				     "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2852 
2853 	rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2854 					 GPIOD_IN);
2855 	if (IS_ERR(rx_int))
2856 		return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
2857 				     "Failed to get RX-INT!\n");
2858 
2859 	reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2860 	if (PTR_ERR(reg_vdd) == -ENODEV)
2861 		reg_vdd = NULL;
2862 	else if (IS_ERR(reg_vdd))
2863 		return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
2864 				     "Failed to get VDD regulator!\n");
2865 
2866 	reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2867 	if (PTR_ERR(reg_xceiver) == -ENODEV)
2868 		reg_xceiver = NULL;
2869 	else if (IS_ERR(reg_xceiver))
2870 		return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
2871 				     "Failed to get Transceiver regulator!\n");
2872 
2873 	clk = devm_clk_get(&spi->dev, NULL);
2874 	if (IS_ERR(clk))
2875 		dev_err_probe(&spi->dev, PTR_ERR(clk),
2876 			      "Failed to get Oscillator (clock)!\n");
2877 	freq = clk_get_rate(clk);
2878 
2879 	/* Sanity check */
2880 	if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2881 	    freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2882 		dev_err(&spi->dev,
2883 			"Oscillator frequency (%u Hz) is too low or high.\n",
2884 			freq);
2885 		return -ERANGE;
2886 	}
2887 
2888 	if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2889 		dev_err(&spi->dev,
2890 			"Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2891 			freq);
2892 		return -ERANGE;
2893 	}
2894 
2895 	ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2896 			    MCP251XFD_TX_OBJ_NUM_MAX);
2897 	if (!ndev)
2898 		return -ENOMEM;
2899 
2900 	SET_NETDEV_DEV(ndev, &spi->dev);
2901 
2902 	ndev->netdev_ops = &mcp251xfd_netdev_ops;
2903 	ndev->irq = spi->irq;
2904 	ndev->flags |= IFF_ECHO;
2905 
2906 	priv = netdev_priv(ndev);
2907 	spi_set_drvdata(spi, priv);
2908 	priv->can.clock.freq = freq;
2909 	priv->can.do_set_mode = mcp251xfd_set_mode;
2910 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2911 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2912 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2913 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2914 		CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
2915 		CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
2916 		CAN_CTRLMODE_CC_LEN8_DLC;
2917 	priv->ndev = ndev;
2918 	priv->spi = spi;
2919 	priv->rx_int = rx_int;
2920 	priv->clk = clk;
2921 	priv->reg_vdd = reg_vdd;
2922 	priv->reg_xceiver = reg_xceiver;
2923 
2924 	match = device_get_match_data(&spi->dev);
2925 	if (match)
2926 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2927 	else
2928 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2929 			spi_get_device_id(spi)->driver_data;
2930 
2931 	/* Errata Reference:
2932 	 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
2933 	 *
2934 	 * The SPI can write corrupted data to the RAM at fast SPI
2935 	 * speeds:
2936 	 *
2937 	 * Simultaneous activity on the CAN bus while writing data to
2938 	 * RAM via the SPI interface, with high SCK frequency, can
2939 	 * lead to corrupted data being written to RAM.
2940 	 *
2941 	 * Fix/Work Around:
2942 	 * Ensure that FSCK is less than or equal to 0.85 *
2943 	 * (FSYSCLK/2).
2944 	 *
2945 	 * Known good combinations are:
2946 	 *
2947 	 * MCP	ext-clk	SoC			SPI			SPI-clk		max-clk	parent-clk	config
2948 	 *
2949 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 8333333 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2950 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	16666667 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2951 	 * 2517	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2952 	 * 2518	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2953 	 * 2518	40 MHz	fsl,imx6dl		fsl,imx51-ecspi		15000000 Hz	 75.00%	 30000000 Hz	default
2954 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 8333333 Hz	 83.33%	 16666667 Hz	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2955 	 *
2956 	 */
2957 	priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2958 	spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2959 	spi->bits_per_word = 8;
2960 	spi->rt = true;
2961 	err = spi_setup(spi);
2962 	if (err)
2963 		goto out_free_candev;
2964 
2965 	err = mcp251xfd_regmap_init(priv);
2966 	if (err)
2967 		goto out_free_candev;
2968 
2969 	err = can_rx_offload_add_manual(ndev, &priv->offload,
2970 					MCP251XFD_NAPI_WEIGHT);
2971 	if (err)
2972 		goto out_free_candev;
2973 
2974 	err = mcp251xfd_register(priv);
2975 	if (err)
2976 		goto out_free_candev;
2977 
2978 	return 0;
2979 
2980  out_free_candev:
2981 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2982 
2983 	free_candev(ndev);
2984 
2985 	return err;
2986 }
2987 
2988 static int mcp251xfd_remove(struct spi_device *spi)
2989 {
2990 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
2991 	struct net_device *ndev = priv->ndev;
2992 
2993 	can_rx_offload_del(&priv->offload);
2994 	mcp251xfd_unregister(priv);
2995 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2996 	free_candev(ndev);
2997 
2998 	return 0;
2999 }
3000 
3001 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
3002 {
3003 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3004 
3005 	return mcp251xfd_clks_and_vdd_disable(priv);
3006 }
3007 
3008 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
3009 {
3010 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3011 
3012 	return mcp251xfd_clks_and_vdd_enable(priv);
3013 }
3014 
3015 static const struct dev_pm_ops mcp251xfd_pm_ops = {
3016 	SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
3017 			   mcp251xfd_runtime_resume, NULL)
3018 };
3019 
3020 static struct spi_driver mcp251xfd_driver = {
3021 	.driver = {
3022 		.name = DEVICE_NAME,
3023 		.pm = &mcp251xfd_pm_ops,
3024 		.of_match_table = mcp251xfd_of_match,
3025 	},
3026 	.probe = mcp251xfd_probe,
3027 	.remove = mcp251xfd_remove,
3028 	.id_table = mcp251xfd_id_table,
3029 };
3030 module_spi_driver(mcp251xfd_driver);
3031 
3032 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3033 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3034 MODULE_LICENSE("GPL v2");
3035