1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/pm_runtime.h>
22 
23 #include <asm/unaligned.h>
24 
25 #include "mcp251xfd.h"
26 
27 #define DEVICE_NAME "mcp251xfd"
28 
29 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
30 	.quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
31 		MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
32 		MCP251XFD_QUIRK_ECC,
33 	.model = MCP251XFD_MODEL_MCP2517FD,
34 };
35 
36 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
37 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
38 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
39 	.model = MCP251XFD_MODEL_MCP2518FD,
40 };
41 
42 /* Autodetect model, start with CRC enabled. */
43 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
44 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
45 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
46 	.model = MCP251XFD_MODEL_MCP251XFD,
47 };
48 
49 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
50 	.name = DEVICE_NAME,
51 	.tseg1_min = 2,
52 	.tseg1_max = 256,
53 	.tseg2_min = 1,
54 	.tseg2_max = 128,
55 	.sjw_max = 128,
56 	.brp_min = 1,
57 	.brp_max = 256,
58 	.brp_inc = 1,
59 };
60 
61 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
62 	.name = DEVICE_NAME,
63 	.tseg1_min = 1,
64 	.tseg1_max = 32,
65 	.tseg2_min = 1,
66 	.tseg2_max = 16,
67 	.sjw_max = 16,
68 	.brp_min = 1,
69 	.brp_max = 256,
70 	.brp_inc = 1,
71 };
72 
73 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
74 {
75 	switch (model) {
76 	case MCP251XFD_MODEL_MCP2517FD:
77 		return "MCP2517FD";
78 	case MCP251XFD_MODEL_MCP2518FD:
79 		return "MCP2518FD";
80 	case MCP251XFD_MODEL_MCP251XFD:
81 		return "MCP251xFD";
82 	}
83 
84 	return "<unknown>";
85 }
86 
87 static inline const char *
88 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
89 {
90 	return __mcp251xfd_get_model_str(priv->devtype_data.model);
91 }
92 
93 static const char *mcp251xfd_get_mode_str(const u8 mode)
94 {
95 	switch (mode) {
96 	case MCP251XFD_REG_CON_MODE_MIXED:
97 		return "Mixed (CAN FD/CAN 2.0)";
98 	case MCP251XFD_REG_CON_MODE_SLEEP:
99 		return "Sleep";
100 	case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
101 		return "Internal Loopback";
102 	case MCP251XFD_REG_CON_MODE_LISTENONLY:
103 		return "Listen Only";
104 	case MCP251XFD_REG_CON_MODE_CONFIG:
105 		return "Configuration";
106 	case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
107 		return "External Loopback";
108 	case MCP251XFD_REG_CON_MODE_CAN2_0:
109 		return "CAN 2.0";
110 	case MCP251XFD_REG_CON_MODE_RESTRICTED:
111 		return "Restricted Operation";
112 	}
113 
114 	return "<unknown>";
115 }
116 
117 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
118 {
119 	if (!priv->reg_vdd)
120 		return 0;
121 
122 	return regulator_enable(priv->reg_vdd);
123 }
124 
125 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
126 {
127 	if (!priv->reg_vdd)
128 		return 0;
129 
130 	return regulator_disable(priv->reg_vdd);
131 }
132 
133 static inline int
134 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
135 {
136 	if (!priv->reg_xceiver)
137 		return 0;
138 
139 	return regulator_enable(priv->reg_xceiver);
140 }
141 
142 static inline int
143 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
144 {
145 	if (!priv->reg_xceiver)
146 		return 0;
147 
148 	return regulator_disable(priv->reg_xceiver);
149 }
150 
151 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
152 {
153 	int err;
154 
155 	err = clk_prepare_enable(priv->clk);
156 	if (err)
157 		return err;
158 
159 	err = mcp251xfd_vdd_enable(priv);
160 	if (err)
161 		clk_disable_unprepare(priv->clk);
162 
163 	/* Wait for oscillator stabilisation time after power up */
164 	usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
165 		     2 * MCP251XFD_OSC_STAB_SLEEP_US);
166 
167 	return err;
168 }
169 
170 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
171 {
172 	int err;
173 
174 	err = mcp251xfd_vdd_disable(priv);
175 	if (err)
176 		return err;
177 
178 	clk_disable_unprepare(priv->clk);
179 
180 	return 0;
181 }
182 
183 static inline u8
184 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
185 				union mcp251xfd_write_reg_buf *write_reg_buf,
186 				const u16 reg, const u32 mask, const u32 val)
187 {
188 	u8 first_byte, last_byte, len;
189 	u8 *data;
190 	__le32 val_le32;
191 
192 	first_byte = mcp251xfd_first_byte_set(mask);
193 	last_byte = mcp251xfd_last_byte_set(mask);
194 	len = last_byte - first_byte + 1;
195 
196 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
197 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
198 	memcpy(data, &val_le32, len);
199 
200 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
201 		u16 crc;
202 
203 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
204 						     len);
205 		/* CRC */
206 		len += sizeof(write_reg_buf->crc.cmd);
207 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
208 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
209 
210 		/* Total length */
211 		len += sizeof(write_reg_buf->crc.crc);
212 	} else {
213 		len += sizeof(write_reg_buf->nocrc.cmd);
214 	}
215 
216 	return len;
217 }
218 
219 static inline int
220 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
221 				 u8 *tef_tail)
222 {
223 	u32 tef_ua;
224 	int err;
225 
226 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
227 	if (err)
228 		return err;
229 
230 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
231 
232 	return 0;
233 }
234 
235 static inline int
236 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
237 				u8 *tx_tail)
238 {
239 	u32 fifo_sta;
240 	int err;
241 
242 	err = regmap_read(priv->map_reg,
243 			  MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
244 			  &fifo_sta);
245 	if (err)
246 		return err;
247 
248 	*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
249 
250 	return 0;
251 }
252 
253 static inline int
254 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
255 				const struct mcp251xfd_rx_ring *ring,
256 				u8 *rx_head)
257 {
258 	u32 fifo_sta;
259 	int err;
260 
261 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
262 			  &fifo_sta);
263 	if (err)
264 		return err;
265 
266 	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
267 
268 	return 0;
269 }
270 
271 static inline int
272 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
273 				const struct mcp251xfd_rx_ring *ring,
274 				u8 *rx_tail)
275 {
276 	u32 fifo_ua;
277 	int err;
278 
279 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
280 			  &fifo_ua);
281 	if (err)
282 		return err;
283 
284 	fifo_ua -= ring->base - MCP251XFD_RAM_START;
285 	*rx_tail = fifo_ua / ring->obj_size;
286 
287 	return 0;
288 }
289 
290 static void
291 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
292 			      const struct mcp251xfd_tx_ring *ring,
293 			      struct mcp251xfd_tx_obj *tx_obj,
294 			      const u8 rts_buf_len,
295 			      const u8 n)
296 {
297 	struct spi_transfer *xfer;
298 	u16 addr;
299 
300 	/* FIFO load */
301 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
302 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
303 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
304 						     addr);
305 	else
306 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
307 					      addr);
308 
309 	xfer = &tx_obj->xfer[0];
310 	xfer->tx_buf = &tx_obj->buf;
311 	xfer->len = 0;	/* actual len is assigned on the fly */
312 	xfer->cs_change = 1;
313 	xfer->cs_change_delay.value = 0;
314 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
315 
316 	/* FIFO request to send */
317 	xfer = &tx_obj->xfer[1];
318 	xfer->tx_buf = &ring->rts_buf;
319 	xfer->len = rts_buf_len;
320 
321 	/* SPI message */
322 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
323 					ARRAY_SIZE(tx_obj->xfer));
324 }
325 
326 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
327 {
328 	struct mcp251xfd_tef_ring *tef_ring;
329 	struct mcp251xfd_tx_ring *tx_ring;
330 	struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
331 	struct mcp251xfd_tx_obj *tx_obj;
332 	struct spi_transfer *xfer;
333 	u32 val;
334 	u16 addr;
335 	u8 len;
336 	int i, j;
337 
338 	netdev_reset_queue(priv->ndev);
339 
340 	/* TEF */
341 	tef_ring = priv->tef;
342 	tef_ring->head = 0;
343 	tef_ring->tail = 0;
344 
345 	/* FIFO increment TEF tail pointer */
346 	addr = MCP251XFD_REG_TEFCON;
347 	val = MCP251XFD_REG_TEFCON_UINC;
348 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
349 					      addr, val, val);
350 
351 	for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
352 		xfer = &tef_ring->uinc_xfer[j];
353 		xfer->tx_buf = &tef_ring->uinc_buf;
354 		xfer->len = len;
355 		xfer->cs_change = 1;
356 		xfer->cs_change_delay.value = 0;
357 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
358 	}
359 
360 	/* "cs_change == 1" on the last transfer results in an active
361 	 * chip select after the complete SPI message. This causes the
362 	 * controller to interpret the next register access as
363 	 * data. Set "cs_change" of the last transfer to "0" to
364 	 * properly deactivate the chip select at the end of the
365 	 * message.
366 	 */
367 	xfer->cs_change = 0;
368 
369 	/* TX */
370 	tx_ring = priv->tx;
371 	tx_ring->head = 0;
372 	tx_ring->tail = 0;
373 	tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
374 
375 	/* FIFO request to send */
376 	addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
377 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
378 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
379 					      addr, val, val);
380 
381 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
382 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
383 
384 	/* RX */
385 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
386 		rx_ring->head = 0;
387 		rx_ring->tail = 0;
388 		rx_ring->nr = i;
389 		rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
390 
391 		if (!prev_rx_ring)
392 			rx_ring->base =
393 				mcp251xfd_get_tx_obj_addr(tx_ring,
394 							  tx_ring->obj_num);
395 		else
396 			rx_ring->base = prev_rx_ring->base +
397 				prev_rx_ring->obj_size *
398 				prev_rx_ring->obj_num;
399 
400 		prev_rx_ring = rx_ring;
401 
402 		/* FIFO increment RX tail pointer */
403 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
404 		val = MCP251XFD_REG_FIFOCON_UINC;
405 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
406 						      addr, val, val);
407 
408 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
409 			xfer = &rx_ring->uinc_xfer[j];
410 			xfer->tx_buf = &rx_ring->uinc_buf;
411 			xfer->len = len;
412 			xfer->cs_change = 1;
413 			xfer->cs_change_delay.value = 0;
414 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
415 		}
416 
417 		/* "cs_change == 1" on the last transfer results in an
418 		 * active chip select after the complete SPI
419 		 * message. This causes the controller to interpret
420 		 * the next register access as data. Set "cs_change"
421 		 * of the last transfer to "0" to properly deactivate
422 		 * the chip select at the end of the message.
423 		 */
424 		xfer->cs_change = 0;
425 	}
426 }
427 
428 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
429 {
430 	int i;
431 
432 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
433 		kfree(priv->rx[i]);
434 		priv->rx[i] = NULL;
435 	}
436 }
437 
438 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
439 {
440 	struct mcp251xfd_tx_ring *tx_ring;
441 	struct mcp251xfd_rx_ring *rx_ring;
442 	int tef_obj_size, tx_obj_size, rx_obj_size;
443 	int tx_obj_num;
444 	int ram_free, i;
445 
446 	tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
447 	/* listen-only mode works like FD mode */
448 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
449 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
450 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
451 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
452 	} else {
453 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
454 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
455 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
456 	}
457 
458 	tx_ring = priv->tx;
459 	tx_ring->obj_num = tx_obj_num;
460 	tx_ring->obj_size = tx_obj_size;
461 
462 	ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
463 		(tef_obj_size + tx_obj_size);
464 
465 	for (i = 0;
466 	     i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
467 	     i++) {
468 		int rx_obj_num;
469 
470 		rx_obj_num = ram_free / rx_obj_size;
471 		rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
472 				 MCP251XFD_RX_OBJ_NUM_MAX);
473 
474 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
475 				  GFP_KERNEL);
476 		if (!rx_ring) {
477 			mcp251xfd_ring_free(priv);
478 			return -ENOMEM;
479 		}
480 		rx_ring->obj_num = rx_obj_num;
481 		rx_ring->obj_size = rx_obj_size;
482 		priv->rx[i] = rx_ring;
483 
484 		ram_free -= rx_ring->obj_num * rx_ring->obj_size;
485 	}
486 	priv->rx_ring_num = i;
487 
488 	netdev_dbg(priv->ndev,
489 		   "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
490 		   tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
491 		   tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
492 
493 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
494 		netdev_dbg(priv->ndev,
495 			   "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
496 			   i, rx_ring->obj_num, rx_ring->obj_size,
497 			   rx_ring->obj_size * rx_ring->obj_num);
498 	}
499 
500 	netdev_dbg(priv->ndev,
501 		   "FIFO setup: free: %d bytes\n",
502 		   ram_free);
503 
504 	return 0;
505 }
506 
507 static inline int
508 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
509 {
510 	u32 val;
511 	int err;
512 
513 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
514 	if (err)
515 		return err;
516 
517 	*mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
518 
519 	return 0;
520 }
521 
522 static int
523 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
524 			  const u8 mode_req, bool nowait)
525 {
526 	u32 con, con_reqop;
527 	int err;
528 
529 	con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
530 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
531 				 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
532 	if (err)
533 		return err;
534 
535 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
536 		return 0;
537 
538 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
539 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
540 						 con) == mode_req,
541 				       MCP251XFD_POLL_SLEEP_US,
542 				       MCP251XFD_POLL_TIMEOUT_US);
543 	if (err) {
544 		u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
545 
546 		netdev_err(priv->ndev,
547 			   "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
548 			   mcp251xfd_get_mode_str(mode_req), mode_req,
549 			   mcp251xfd_get_mode_str(mode), mode);
550 		return err;
551 	}
552 
553 	return 0;
554 }
555 
556 static inline int
557 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
558 			const u8 mode_req)
559 {
560 	return __mcp251xfd_chip_set_mode(priv, mode_req, false);
561 }
562 
563 static inline int __maybe_unused
564 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
565 			       const u8 mode_req)
566 {
567 	return __mcp251xfd_chip_set_mode(priv, mode_req, true);
568 }
569 
570 static inline bool mcp251xfd_osc_invalid(u32 reg)
571 {
572 	return reg == 0x0 || reg == 0xffffffff;
573 }
574 
575 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
576 {
577 	u32 osc, osc_reference, osc_mask;
578 	int err;
579 
580 	/* Set Power On Defaults for "Clock Output Divisor" and remove
581 	 * "Oscillator Disable" bit.
582 	 */
583 	osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
584 			 MCP251XFD_REG_OSC_CLKODIV_10);
585 	osc_reference = MCP251XFD_REG_OSC_OSCRDY;
586 	osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
587 
588 	/* Note:
589 	 *
590 	 * If the controller is in Sleep Mode the following write only
591 	 * removes the "Oscillator Disable" bit and powers it up. All
592 	 * other bits are unaffected.
593 	 */
594 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
595 	if (err)
596 		return err;
597 
598 	/* Wait for "Oscillator Ready" bit */
599 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
600 				       (osc & osc_mask) == osc_reference,
601 				       MCP251XFD_OSC_STAB_SLEEP_US,
602 				       MCP251XFD_OSC_STAB_TIMEOUT_US);
603 	if (mcp251xfd_osc_invalid(osc)) {
604 		netdev_err(priv->ndev,
605 			   "Failed to detect %s (osc=0x%08x).\n",
606 			   mcp251xfd_get_model_str(priv), osc);
607 		return -ENODEV;
608 	} else if (err == -ETIMEDOUT) {
609 		netdev_err(priv->ndev,
610 			   "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
611 			   osc, osc_reference);
612 		return -ETIMEDOUT;
613 	}
614 
615 	return err;
616 }
617 
618 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
619 {
620 	const __be16 cmd = mcp251xfd_cmd_reset();
621 	int err;
622 
623 	/* The Set Mode and SPI Reset command only seems to works if
624 	 * the controller is not in Sleep Mode.
625 	 */
626 	err = mcp251xfd_chip_clock_enable(priv);
627 	if (err)
628 		return err;
629 
630 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
631 	if (err)
632 		return err;
633 
634 	/* spi_write_then_read() works with non DMA-safe buffers */
635 	return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
636 }
637 
638 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
639 {
640 	u32 osc, osc_reference;
641 	u8 mode;
642 	int err;
643 
644 	err = mcp251xfd_chip_get_mode(priv, &mode);
645 	if (err)
646 		return err;
647 
648 	if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
649 		netdev_info(priv->ndev,
650 			    "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
651 			    mcp251xfd_get_mode_str(mode), mode);
652 		return -ETIMEDOUT;
653 	}
654 
655 	osc_reference = MCP251XFD_REG_OSC_OSCRDY |
656 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
657 			   MCP251XFD_REG_OSC_CLKODIV_10);
658 
659 	/* check reset defaults of OSC reg */
660 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
661 	if (err)
662 		return err;
663 
664 	if (osc != osc_reference) {
665 		netdev_info(priv->ndev,
666 			    "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
667 			    osc, osc_reference);
668 		return -ETIMEDOUT;
669 	}
670 
671 	return 0;
672 }
673 
674 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
675 {
676 	int err, i;
677 
678 	for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
679 		if (i)
680 			netdev_info(priv->ndev,
681 				    "Retrying to reset controller.\n");
682 
683 		err = mcp251xfd_chip_softreset_do(priv);
684 		if (err == -ETIMEDOUT)
685 			continue;
686 		if (err)
687 			return err;
688 
689 		err = mcp251xfd_chip_softreset_check(priv);
690 		if (err == -ETIMEDOUT)
691 			continue;
692 		if (err)
693 			return err;
694 
695 		return 0;
696 	}
697 
698 	return err;
699 }
700 
701 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
702 {
703 	u32 osc;
704 	int err;
705 
706 	/* Activate Low Power Mode on Oscillator Disable. This only
707 	 * works on the MCP2518FD. The MCP2517FD will go into normal
708 	 * Sleep Mode instead.
709 	 */
710 	osc = MCP251XFD_REG_OSC_LPMEN |
711 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
712 			   MCP251XFD_REG_OSC_CLKODIV_10);
713 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
714 	if (err)
715 		return err;
716 
717 	/* Set Time Base Counter Prescaler to 1.
718 	 *
719 	 * This means an overflow of the 32 bit Time Base Counter
720 	 * register at 40 MHz every 107 seconds.
721 	 */
722 	return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
723 			    MCP251XFD_REG_TSCON_TBCEN);
724 }
725 
726 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
727 {
728 	const struct can_bittiming *bt = &priv->can.bittiming;
729 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
730 	u32 val = 0;
731 	s8 tdco;
732 	int err;
733 
734 	/* CAN Control Register
735 	 *
736 	 * - no transmit bandwidth sharing
737 	 * - config mode
738 	 * - disable transmit queue
739 	 * - store in transmit FIFO event
740 	 * - transition to restricted operation mode on system error
741 	 * - ESI is transmitted recessive when ESI of message is high or
742 	 *   CAN controller error passive
743 	 * - restricted retransmission attempts,
744 	 *   use TQXCON_TXAT and FIFOCON_TXAT
745 	 * - wake-up filter bits T11FILTER
746 	 * - use CAN bus line filter for wakeup
747 	 * - protocol exception is treated as a form error
748 	 * - Do not compare data bytes
749 	 */
750 	val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
751 			 MCP251XFD_REG_CON_MODE_CONFIG) |
752 		MCP251XFD_REG_CON_STEF |
753 		MCP251XFD_REG_CON_ESIGM |
754 		MCP251XFD_REG_CON_RTXAT |
755 		FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
756 			   MCP251XFD_REG_CON_WFT_T11FILTER) |
757 		MCP251XFD_REG_CON_WAKFIL |
758 		MCP251XFD_REG_CON_PXEDIS;
759 
760 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
761 		val |= MCP251XFD_REG_CON_ISOCRCEN;
762 
763 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
764 	if (err)
765 		return err;
766 
767 	/* Nominal Bit Time */
768 	val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
769 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
770 			   bt->prop_seg + bt->phase_seg1 - 1) |
771 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
772 			   bt->phase_seg2 - 1) |
773 		FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
774 
775 	err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
776 	if (err)
777 		return err;
778 
779 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
780 		return 0;
781 
782 	/* Data Bit Time */
783 	val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
784 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
785 			   dbt->prop_seg + dbt->phase_seg1 - 1) |
786 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
787 			   dbt->phase_seg2 - 1) |
788 		FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
789 
790 	err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
791 	if (err)
792 		return err;
793 
794 	/* Transmitter Delay Compensation */
795 	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
796 		       -64, 63);
797 	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
798 			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
799 		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
800 
801 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
802 }
803 
804 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
805 {
806 	u32 val;
807 
808 	if (!priv->rx_int)
809 		return 0;
810 
811 	/* Configure GPIOs:
812 	 * - PIN0: GPIO Input
813 	 * - PIN1: GPIO Input/RX Interrupt
814 	 *
815 	 * PIN1 must be Input, otherwise there is a glitch on the
816 	 * rx-INT line. It happens between setting the PIN as output
817 	 * (in the first byte of the SPI transfer) and configuring the
818 	 * PIN as interrupt (in the last byte of the SPI transfer).
819 	 */
820 	val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
821 		MCP251XFD_REG_IOCON_TRIS0;
822 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
823 }
824 
825 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
826 {
827 	u32 val;
828 
829 	if (!priv->rx_int)
830 		return 0;
831 
832 	/* Configure GPIOs:
833 	 * - PIN0: GPIO Input
834 	 * - PIN1: GPIO Input
835 	 */
836 	val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
837 		MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
838 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
839 }
840 
841 static int
842 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
843 				const struct mcp251xfd_rx_ring *ring)
844 {
845 	u32 fifo_con;
846 
847 	/* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
848 	 *
849 	 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
850 	 * generate a RXOVIF, use this to properly detect RX MAB
851 	 * overflows.
852 	 */
853 	fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
854 			      ring->obj_num - 1) |
855 		MCP251XFD_REG_FIFOCON_RXTSEN |
856 		MCP251XFD_REG_FIFOCON_RXOVIE |
857 		MCP251XFD_REG_FIFOCON_TFNRFNIE;
858 
859 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
860 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
861 				       MCP251XFD_REG_FIFOCON_PLSIZE_64);
862 	else
863 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
864 				       MCP251XFD_REG_FIFOCON_PLSIZE_8);
865 
866 	return regmap_write(priv->map_reg,
867 			    MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
868 }
869 
870 static int
871 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
872 				  const struct mcp251xfd_rx_ring *ring)
873 {
874 	u32 fltcon;
875 
876 	fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
877 		MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
878 
879 	return regmap_update_bits(priv->map_reg,
880 				  MCP251XFD_REG_FLTCON(ring->nr >> 2),
881 				  MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
882 				  fltcon);
883 }
884 
885 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
886 {
887 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
888 	const struct mcp251xfd_rx_ring *rx_ring;
889 	u32 val;
890 	int err, n;
891 
892 	/* TEF */
893 	val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
894 			 tx_ring->obj_num - 1) |
895 		MCP251XFD_REG_TEFCON_TEFTSEN |
896 		MCP251XFD_REG_TEFCON_TEFOVIE |
897 		MCP251XFD_REG_TEFCON_TEFNEIE;
898 
899 	err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
900 	if (err)
901 		return err;
902 
903 	/* FIFO 1 - TX */
904 	val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
905 			 tx_ring->obj_num - 1) |
906 		MCP251XFD_REG_FIFOCON_TXEN |
907 		MCP251XFD_REG_FIFOCON_TXATIE;
908 
909 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
910 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
911 				  MCP251XFD_REG_FIFOCON_PLSIZE_64);
912 	else
913 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
914 				  MCP251XFD_REG_FIFOCON_PLSIZE_8);
915 
916 	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
917 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
918 				  MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
919 	else
920 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
921 				  MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
922 
923 	err = regmap_write(priv->map_reg,
924 			   MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
925 			   val);
926 	if (err)
927 		return err;
928 
929 	/* RX FIFOs */
930 	mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
931 		err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
932 		if (err)
933 			return err;
934 
935 		err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
936 		if (err)
937 			return err;
938 	}
939 
940 	return 0;
941 }
942 
943 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
944 {
945 	struct mcp251xfd_ecc *ecc = &priv->ecc;
946 	void *ram;
947 	u32 val = 0;
948 	int err;
949 
950 	ecc->ecc_stat = 0;
951 
952 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
953 		val = MCP251XFD_REG_ECCCON_ECCEN;
954 
955 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
956 				 MCP251XFD_REG_ECCCON_ECCEN, val);
957 	if (err)
958 		return err;
959 
960 	ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
961 	if (!ram)
962 		return -ENOMEM;
963 
964 	err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
965 			       MCP251XFD_RAM_SIZE);
966 	kfree(ram);
967 
968 	return err;
969 }
970 
971 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
972 {
973 	struct mcp251xfd_ecc *ecc = &priv->ecc;
974 
975 	ecc->ecc_stat = 0;
976 }
977 
978 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
979 {
980 	u8 mode;
981 
982 
983 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
984 		mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
985 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
986 		mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
987 	else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
988 		mode = MCP251XFD_REG_CON_MODE_MIXED;
989 	else
990 		mode = MCP251XFD_REG_CON_MODE_CAN2_0;
991 
992 	return mode;
993 }
994 
995 static int
996 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
997 				 bool nowait)
998 {
999 	u8 mode;
1000 
1001 	mode = mcp251xfd_get_normal_mode(priv);
1002 
1003 	return __mcp251xfd_chip_set_mode(priv, mode, nowait);
1004 }
1005 
1006 static inline int
1007 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
1008 {
1009 	return __mcp251xfd_chip_set_normal_mode(priv, false);
1010 }
1011 
1012 static inline int
1013 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
1014 {
1015 	return __mcp251xfd_chip_set_normal_mode(priv, true);
1016 }
1017 
1018 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1019 {
1020 	u32 val;
1021 	int err;
1022 
1023 	val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1024 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1025 	if (err)
1026 		return err;
1027 
1028 	val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1029 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1030 	if (err)
1031 		return err;
1032 
1033 	val = MCP251XFD_REG_INT_CERRIE |
1034 		MCP251XFD_REG_INT_SERRIE |
1035 		MCP251XFD_REG_INT_RXOVIE |
1036 		MCP251XFD_REG_INT_TXATIE |
1037 		MCP251XFD_REG_INT_SPICRCIE |
1038 		MCP251XFD_REG_INT_ECCIE |
1039 		MCP251XFD_REG_INT_TEFIE |
1040 		MCP251XFD_REG_INT_MODIE |
1041 		MCP251XFD_REG_INT_RXIE;
1042 
1043 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1044 		val |= MCP251XFD_REG_INT_IVMIE;
1045 
1046 	return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1047 }
1048 
1049 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1050 {
1051 	int err;
1052 	u32 mask;
1053 
1054 	err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1055 	if (err)
1056 		return err;
1057 
1058 	mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1059 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1060 				 mask, 0x0);
1061 	if (err)
1062 		return err;
1063 
1064 	return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1065 }
1066 
1067 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1068 			       const enum can_state state)
1069 {
1070 	priv->can.state = state;
1071 
1072 	mcp251xfd_chip_interrupts_disable(priv);
1073 	mcp251xfd_chip_rx_int_disable(priv);
1074 	return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1075 }
1076 
1077 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1078 {
1079 	int err;
1080 
1081 	err = mcp251xfd_chip_softreset(priv);
1082 	if (err)
1083 		goto out_chip_stop;
1084 
1085 	err = mcp251xfd_chip_clock_init(priv);
1086 	if (err)
1087 		goto out_chip_stop;
1088 
1089 	err = mcp251xfd_set_bittiming(priv);
1090 	if (err)
1091 		goto out_chip_stop;
1092 
1093 	err = mcp251xfd_chip_rx_int_enable(priv);
1094 	if (err)
1095 		return err;
1096 
1097 	err = mcp251xfd_chip_ecc_init(priv);
1098 	if (err)
1099 		goto out_chip_stop;
1100 
1101 	mcp251xfd_ring_init(priv);
1102 
1103 	err = mcp251xfd_chip_fifo_init(priv);
1104 	if (err)
1105 		goto out_chip_stop;
1106 
1107 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1108 
1109 	err = mcp251xfd_chip_set_normal_mode(priv);
1110 	if (err)
1111 		goto out_chip_stop;
1112 
1113 	return 0;
1114 
1115  out_chip_stop:
1116 	mcp251xfd_dump(priv);
1117 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1118 
1119 	return err;
1120 }
1121 
1122 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1123 {
1124 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
1125 	int err;
1126 
1127 	switch (mode) {
1128 	case CAN_MODE_START:
1129 		err = mcp251xfd_chip_start(priv);
1130 		if (err)
1131 			return err;
1132 
1133 		err = mcp251xfd_chip_interrupts_enable(priv);
1134 		if (err) {
1135 			mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1136 			return err;
1137 		}
1138 
1139 		netif_wake_queue(ndev);
1140 		break;
1141 
1142 	default:
1143 		return -EOPNOTSUPP;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1150 					struct can_berr_counter *bec)
1151 {
1152 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1153 	u32 trec;
1154 	int err;
1155 
1156 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1157 	if (err)
1158 		return err;
1159 
1160 	if (trec & MCP251XFD_REG_TREC_TXBO)
1161 		bec->txerr = 256;
1162 	else
1163 		bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1164 	bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1165 
1166 	return 0;
1167 }
1168 
1169 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1170 				      struct can_berr_counter *bec)
1171 {
1172 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1173 
1174 	/* Avoid waking up the controller if the interface is down */
1175 	if (!(ndev->flags & IFF_UP))
1176 		return 0;
1177 
1178 	/* The controller is powered down during Bus Off, use saved
1179 	 * bec values.
1180 	 */
1181 	if (priv->can.state == CAN_STATE_BUS_OFF) {
1182 		*bec = priv->bec;
1183 		return 0;
1184 	}
1185 
1186 	return __mcp251xfd_get_berr_counter(ndev, bec);
1187 }
1188 
1189 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1190 {
1191 	u8 tef_tail_chip, tef_tail;
1192 	int err;
1193 
1194 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1195 		return 0;
1196 
1197 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1198 	if (err)
1199 		return err;
1200 
1201 	tef_tail = mcp251xfd_get_tef_tail(priv);
1202 	if (tef_tail_chip != tef_tail) {
1203 		netdev_err(priv->ndev,
1204 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1205 			   tef_tail_chip, tef_tail);
1206 		return -EILSEQ;
1207 	}
1208 
1209 	return 0;
1210 }
1211 
1212 static int
1213 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1214 			const struct mcp251xfd_rx_ring *ring)
1215 {
1216 	u8 rx_tail_chip, rx_tail;
1217 	int err;
1218 
1219 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1220 		return 0;
1221 
1222 	err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1223 	if (err)
1224 		return err;
1225 
1226 	rx_tail = mcp251xfd_get_rx_tail(ring);
1227 	if (rx_tail_chip != rx_tail) {
1228 		netdev_err(priv->ndev,
1229 			   "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1230 			   rx_tail_chip, rx_tail);
1231 		return -EILSEQ;
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static int
1238 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1239 {
1240 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1241 	u32 tef_sta;
1242 	int err;
1243 
1244 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1245 	if (err)
1246 		return err;
1247 
1248 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1249 		netdev_err(priv->ndev,
1250 			   "Transmit Event FIFO buffer overflow.\n");
1251 		return -ENOBUFS;
1252 	}
1253 
1254 	netdev_info(priv->ndev,
1255 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
1256 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1257 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1258 		    "not empty" : "empty",
1259 		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1260 
1261 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
1262 	return -EAGAIN;
1263 }
1264 
1265 static int
1266 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1267 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1268 			   unsigned int *frame_len_ptr)
1269 {
1270 	struct net_device_stats *stats = &priv->ndev->stats;
1271 	struct sk_buff *skb;
1272 	u32 seq, seq_masked, tef_tail_masked, tef_tail;
1273 
1274 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1275 			hw_tef_obj->flags);
1276 
1277 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
1278 	 * compare 7 bits, this should be enough to detect
1279 	 * net-yet-completed, i.e. old TEF objects.
1280 	 */
1281 	seq_masked = seq &
1282 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1283 	tef_tail_masked = priv->tef->tail &
1284 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1285 	if (seq_masked != tef_tail_masked)
1286 		return mcp251xfd_handle_tefif_recover(priv, seq);
1287 
1288 	tef_tail = mcp251xfd_get_tef_tail(priv);
1289 	skb = priv->can.echo_skb[tef_tail];
1290 	if (skb)
1291 		mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
1292 	stats->tx_bytes +=
1293 		can_rx_offload_get_echo_skb(&priv->offload,
1294 					    tef_tail, hw_tef_obj->ts,
1295 					    frame_len_ptr);
1296 	stats->tx_packets++;
1297 	priv->tef->tail++;
1298 
1299 	return 0;
1300 }
1301 
1302 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1303 {
1304 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1305 	unsigned int new_head;
1306 	u8 chip_tx_tail;
1307 	int err;
1308 
1309 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1310 	if (err)
1311 		return err;
1312 
1313 	/* chip_tx_tail, is the next TX-Object send by the HW.
1314 	 * The new TEF head must be >= the old head, ...
1315 	 */
1316 	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1317 	if (new_head <= priv->tef->head)
1318 		new_head += tx_ring->obj_num;
1319 
1320 	/* ... but it cannot exceed the TX head. */
1321 	priv->tef->head = min(new_head, tx_ring->head);
1322 
1323 	return mcp251xfd_check_tef_tail(priv);
1324 }
1325 
1326 static inline int
1327 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1328 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1329 		       const u8 offset, const u8 len)
1330 {
1331 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1332 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1333 
1334 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1335 	    (offset > tx_ring->obj_num ||
1336 	     len > tx_ring->obj_num ||
1337 	     offset + len > tx_ring->obj_num)) {
1338 		netdev_err(priv->ndev,
1339 			   "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1340 			   tx_ring->obj_num, offset, len);
1341 		return -ERANGE;
1342 	}
1343 
1344 	return regmap_bulk_read(priv->map_rx,
1345 				mcp251xfd_get_tef_obj_addr(offset),
1346 				hw_tef_obj,
1347 				sizeof(*hw_tef_obj) / val_bytes * len);
1348 }
1349 
1350 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1351 {
1352 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1353 	unsigned int total_frame_len = 0;
1354 	u8 tef_tail, len, l;
1355 	int err, i;
1356 
1357 	err = mcp251xfd_tef_ring_update(priv);
1358 	if (err)
1359 		return err;
1360 
1361 	tef_tail = mcp251xfd_get_tef_tail(priv);
1362 	len = mcp251xfd_get_tef_len(priv);
1363 	l = mcp251xfd_get_tef_linear_len(priv);
1364 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1365 	if (err)
1366 		return err;
1367 
1368 	if (l < len) {
1369 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1370 		if (err)
1371 			return err;
1372 	}
1373 
1374 	for (i = 0; i < len; i++) {
1375 		unsigned int frame_len = 0;
1376 
1377 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
1378 		/* -EAGAIN means the Sequence Number in the TEF
1379 		 * doesn't match our tef_tail. This can happen if we
1380 		 * read the TEF objects too early. Leave loop let the
1381 		 * interrupt handler call us again.
1382 		 */
1383 		if (err == -EAGAIN)
1384 			goto out_netif_wake_queue;
1385 		if (err)
1386 			return err;
1387 
1388 		total_frame_len += frame_len;
1389 	}
1390 
1391  out_netif_wake_queue:
1392 	len = i;	/* number of handled goods TEFs */
1393 	if (len) {
1394 		struct mcp251xfd_tef_ring *ring = priv->tef;
1395 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1396 		int offset;
1397 
1398 		/* Increment the TEF FIFO tail pointer 'len' times in
1399 		 * a single SPI message.
1400 		 *
1401 		 * Note:
1402 		 * Calculate offset, so that the SPI transfer ends on
1403 		 * the last message of the uinc_xfer array, which has
1404 		 * "cs_change == 0", to properly deactivate the chip
1405 		 * select.
1406 		 */
1407 		offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1408 		err = spi_sync_transfer(priv->spi,
1409 					ring->uinc_xfer + offset, len);
1410 		if (err)
1411 			return err;
1412 
1413 		tx_ring->tail += len;
1414 		netdev_completed_queue(priv->ndev, len, total_frame_len);
1415 
1416 		err = mcp251xfd_check_tef_tail(priv);
1417 		if (err)
1418 			return err;
1419 	}
1420 
1421 	mcp251xfd_ecc_tefif_successful(priv);
1422 
1423 	if (mcp251xfd_get_tx_free(priv->tx)) {
1424 		/* Make sure that anybody stopping the queue after
1425 		 * this sees the new tx_ring->tail.
1426 		 */
1427 		smp_mb();
1428 		netif_wake_queue(priv->ndev);
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static int
1435 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1436 			 struct mcp251xfd_rx_ring *ring)
1437 {
1438 	u32 new_head;
1439 	u8 chip_rx_head;
1440 	int err;
1441 
1442 	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1443 	if (err)
1444 		return err;
1445 
1446 	/* chip_rx_head, is the next RX-Object filled by the HW.
1447 	 * The new RX head must be >= the old head.
1448 	 */
1449 	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1450 	if (new_head <= ring->head)
1451 		new_head += ring->obj_num;
1452 
1453 	ring->head = new_head;
1454 
1455 	return mcp251xfd_check_rx_tail(priv, ring);
1456 }
1457 
1458 static void
1459 mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv,
1460 			   const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1461 			   struct sk_buff *skb)
1462 {
1463 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1464 	u8 dlc;
1465 
1466 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1467 		u32 sid, eid;
1468 
1469 		eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1470 		sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1471 
1472 		cfd->can_id = CAN_EFF_FLAG |
1473 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1474 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1475 	} else {
1476 		cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1477 					hw_rx_obj->id);
1478 	}
1479 
1480 	dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
1481 
1482 	/* CANFD */
1483 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1484 
1485 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1486 			cfd->flags |= CANFD_ESI;
1487 
1488 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1489 			cfd->flags |= CANFD_BRS;
1490 
1491 		cfd->len = can_fd_dlc2len(dlc);
1492 	} else {
1493 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1494 			cfd->can_id |= CAN_RTR_FLAG;
1495 
1496 		can_frame_set_cc_len((struct can_frame *)cfd, dlc,
1497 				     priv->can.ctrlmode);
1498 	}
1499 
1500 	if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
1501 		memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1502 
1503 	mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
1504 }
1505 
1506 static int
1507 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1508 			  struct mcp251xfd_rx_ring *ring,
1509 			  const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1510 {
1511 	struct net_device_stats *stats = &priv->ndev->stats;
1512 	struct sk_buff *skb;
1513 	struct canfd_frame *cfd;
1514 	int err;
1515 
1516 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1517 		skb = alloc_canfd_skb(priv->ndev, &cfd);
1518 	else
1519 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1520 
1521 	if (!skb) {
1522 		stats->rx_dropped++;
1523 		return 0;
1524 	}
1525 
1526 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1527 	err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1528 	if (err)
1529 		stats->rx_fifo_errors++;
1530 
1531 	return 0;
1532 }
1533 
1534 static inline int
1535 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1536 		      const struct mcp251xfd_rx_ring *ring,
1537 		      struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1538 		      const u8 offset, const u8 len)
1539 {
1540 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1541 	int err;
1542 
1543 	err = regmap_bulk_read(priv->map_rx,
1544 			       mcp251xfd_get_rx_obj_addr(ring, offset),
1545 			       hw_rx_obj,
1546 			       len * ring->obj_size / val_bytes);
1547 
1548 	return err;
1549 }
1550 
1551 static int
1552 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1553 			   struct mcp251xfd_rx_ring *ring)
1554 {
1555 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1556 	u8 rx_tail, len;
1557 	int err, i;
1558 
1559 	err = mcp251xfd_rx_ring_update(priv, ring);
1560 	if (err)
1561 		return err;
1562 
1563 	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1564 		int offset;
1565 
1566 		rx_tail = mcp251xfd_get_rx_tail(ring);
1567 
1568 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1569 					    rx_tail, len);
1570 		if (err)
1571 			return err;
1572 
1573 		for (i = 0; i < len; i++) {
1574 			err = mcp251xfd_handle_rxif_one(priv, ring,
1575 							(void *)hw_rx_obj +
1576 							i * ring->obj_size);
1577 			if (err)
1578 				return err;
1579 		}
1580 
1581 		/* Increment the RX FIFO tail pointer 'len' times in a
1582 		 * single SPI message.
1583 		 *
1584 		 * Note:
1585 		 * Calculate offset, so that the SPI transfer ends on
1586 		 * the last message of the uinc_xfer array, which has
1587 		 * "cs_change == 0", to properly deactivate the chip
1588 		 * select.
1589 		 */
1590 		offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1591 		err = spi_sync_transfer(priv->spi,
1592 					ring->uinc_xfer + offset, len);
1593 		if (err)
1594 			return err;
1595 
1596 		ring->tail += len;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1603 {
1604 	struct mcp251xfd_rx_ring *ring;
1605 	int err, n;
1606 
1607 	mcp251xfd_for_each_rx_ring(priv, ring, n) {
1608 		err = mcp251xfd_handle_rxif_ring(priv, ring);
1609 		if (err)
1610 			return err;
1611 	}
1612 
1613 	return 0;
1614 }
1615 
1616 static struct sk_buff *
1617 mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
1618 			    struct can_frame **cf, u32 *timestamp)
1619 {
1620 	struct sk_buff *skb;
1621 	int err;
1622 
1623 	err = mcp251xfd_get_timestamp(priv, timestamp);
1624 	if (err)
1625 		return NULL;
1626 
1627 	skb = alloc_can_err_skb(priv->ndev, cf);
1628 	if (skb)
1629 		mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
1630 
1631 	return skb;
1632 }
1633 
1634 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1635 {
1636 	struct net_device_stats *stats = &priv->ndev->stats;
1637 	struct mcp251xfd_rx_ring *ring;
1638 	struct sk_buff *skb;
1639 	struct can_frame *cf;
1640 	u32 timestamp, rxovif;
1641 	int err, i;
1642 
1643 	stats->rx_over_errors++;
1644 	stats->rx_errors++;
1645 
1646 	err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1647 	if (err)
1648 		return err;
1649 
1650 	mcp251xfd_for_each_rx_ring(priv, ring, i) {
1651 		if (!(rxovif & BIT(ring->fifo_nr)))
1652 			continue;
1653 
1654 		/* If SERRIF is active, there was a RX MAB overflow. */
1655 		if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1656 			netdev_info(priv->ndev,
1657 				    "RX-%d: MAB overflow detected.\n",
1658 				    ring->nr);
1659 		} else {
1660 			netdev_info(priv->ndev,
1661 				    "RX-%d: FIFO overflow.\n", ring->nr);
1662 		}
1663 
1664 		err = regmap_update_bits(priv->map_reg,
1665 					 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1666 					 MCP251XFD_REG_FIFOSTA_RXOVIF,
1667 					 0x0);
1668 		if (err)
1669 			return err;
1670 	}
1671 
1672 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1673 	if (!skb)
1674 		return 0;
1675 
1676 	cf->can_id |= CAN_ERR_CRTL;
1677 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1678 
1679 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1680 	if (err)
1681 		stats->rx_fifo_errors++;
1682 
1683 	return 0;
1684 }
1685 
1686 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1687 {
1688 	netdev_info(priv->ndev, "%s\n", __func__);
1689 
1690 	return 0;
1691 }
1692 
1693 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1694 {
1695 	struct net_device_stats *stats = &priv->ndev->stats;
1696 	u32 bdiag1, timestamp;
1697 	struct sk_buff *skb;
1698 	struct can_frame *cf = NULL;
1699 	int err;
1700 
1701 	err = mcp251xfd_get_timestamp(priv, &timestamp);
1702 	if (err)
1703 		return err;
1704 
1705 	err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1706 	if (err)
1707 		return err;
1708 
1709 	/* Write 0s to clear error bits, don't write 1s to non active
1710 	 * bits, as they will be set.
1711 	 */
1712 	err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1713 	if (err)
1714 		return err;
1715 
1716 	priv->can.can_stats.bus_error++;
1717 
1718 	skb = alloc_can_err_skb(priv->ndev, &cf);
1719 	if (cf)
1720 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1721 
1722 	/* Controller misconfiguration */
1723 	if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1724 		netdev_err(priv->ndev,
1725 			   "recv'd DLC is larger than PLSIZE of FIFO element.");
1726 
1727 	/* RX errors */
1728 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1729 		      MCP251XFD_REG_BDIAG1_NCRCERR)) {
1730 		netdev_dbg(priv->ndev, "CRC error\n");
1731 
1732 		stats->rx_errors++;
1733 		if (cf)
1734 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1735 	}
1736 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1737 		      MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1738 		netdev_dbg(priv->ndev, "Stuff error\n");
1739 
1740 		stats->rx_errors++;
1741 		if (cf)
1742 			cf->data[2] |= CAN_ERR_PROT_STUFF;
1743 	}
1744 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1745 		      MCP251XFD_REG_BDIAG1_NFORMERR)) {
1746 		netdev_dbg(priv->ndev, "Format error\n");
1747 
1748 		stats->rx_errors++;
1749 		if (cf)
1750 			cf->data[2] |= CAN_ERR_PROT_FORM;
1751 	}
1752 
1753 	/* TX errors */
1754 	if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1755 		netdev_dbg(priv->ndev, "NACK error\n");
1756 
1757 		stats->tx_errors++;
1758 		if (cf) {
1759 			cf->can_id |= CAN_ERR_ACK;
1760 			cf->data[2] |= CAN_ERR_PROT_TX;
1761 		}
1762 	}
1763 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1764 		      MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1765 		netdev_dbg(priv->ndev, "Bit1 error\n");
1766 
1767 		stats->tx_errors++;
1768 		if (cf)
1769 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1770 	}
1771 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1772 		      MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1773 		netdev_dbg(priv->ndev, "Bit0 error\n");
1774 
1775 		stats->tx_errors++;
1776 		if (cf)
1777 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1778 	}
1779 
1780 	if (!cf)
1781 		return 0;
1782 
1783 	mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
1784 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1785 	if (err)
1786 		stats->rx_fifo_errors++;
1787 
1788 	return 0;
1789 }
1790 
1791 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1792 {
1793 	struct net_device_stats *stats = &priv->ndev->stats;
1794 	struct sk_buff *skb;
1795 	struct can_frame *cf = NULL;
1796 	enum can_state new_state, rx_state, tx_state;
1797 	u32 trec, timestamp;
1798 	int err;
1799 
1800 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1801 	if (err)
1802 		return err;
1803 
1804 	if (trec & MCP251XFD_REG_TREC_TXBO)
1805 		tx_state = CAN_STATE_BUS_OFF;
1806 	else if (trec & MCP251XFD_REG_TREC_TXBP)
1807 		tx_state = CAN_STATE_ERROR_PASSIVE;
1808 	else if (trec & MCP251XFD_REG_TREC_TXWARN)
1809 		tx_state = CAN_STATE_ERROR_WARNING;
1810 	else
1811 		tx_state = CAN_STATE_ERROR_ACTIVE;
1812 
1813 	if (trec & MCP251XFD_REG_TREC_RXBP)
1814 		rx_state = CAN_STATE_ERROR_PASSIVE;
1815 	else if (trec & MCP251XFD_REG_TREC_RXWARN)
1816 		rx_state = CAN_STATE_ERROR_WARNING;
1817 	else
1818 		rx_state = CAN_STATE_ERROR_ACTIVE;
1819 
1820 	new_state = max(tx_state, rx_state);
1821 	if (new_state == priv->can.state)
1822 		return 0;
1823 
1824 	/* The skb allocation might fail, but can_change_state()
1825 	 * handles cf == NULL.
1826 	 */
1827 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1828 	can_change_state(priv->ndev, cf, tx_state, rx_state);
1829 
1830 	if (new_state == CAN_STATE_BUS_OFF) {
1831 		/* As we're going to switch off the chip now, let's
1832 		 * save the error counters and return them to
1833 		 * userspace, if do_get_berr_counter() is called while
1834 		 * the chip is in Bus Off.
1835 		 */
1836 		err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1837 		if (err)
1838 			return err;
1839 
1840 		mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1841 		can_bus_off(priv->ndev);
1842 	}
1843 
1844 	if (!skb)
1845 		return 0;
1846 
1847 	if (new_state != CAN_STATE_BUS_OFF) {
1848 		struct can_berr_counter bec;
1849 
1850 		err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1851 		if (err)
1852 			return err;
1853 		cf->data[6] = bec.txerr;
1854 		cf->data[7] = bec.rxerr;
1855 	}
1856 
1857 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1858 	if (err)
1859 		stats->rx_fifo_errors++;
1860 
1861 	return 0;
1862 }
1863 
1864 static int
1865 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1866 {
1867 	const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1868 	u8 mode;
1869 	int err;
1870 
1871 	err = mcp251xfd_chip_get_mode(priv, &mode);
1872 	if (err)
1873 		return err;
1874 
1875 	if (mode == mode_reference) {
1876 		netdev_dbg(priv->ndev,
1877 			   "Controller changed into %s Mode (%u).\n",
1878 			   mcp251xfd_get_mode_str(mode), mode);
1879 		return 0;
1880 	}
1881 
1882 	/* According to MCP2517FD errata DS80000792B 1., during a TX
1883 	 * MAB underflow, the controller will transition to Restricted
1884 	 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1885 	 *
1886 	 * However this is not always the case. If SERR2LOM is
1887 	 * configured for Restricted Operation Mode (SERR2LOM not set)
1888 	 * the MCP2517FD will sometimes transition to Listen Only Mode
1889 	 * first. When polling this bit we see that it will transition
1890 	 * to Restricted Operation Mode shortly after.
1891 	 */
1892 	if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1893 	    (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1894 	     mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1895 		netdev_dbg(priv->ndev,
1896 			   "Controller changed into %s Mode (%u).\n",
1897 			   mcp251xfd_get_mode_str(mode), mode);
1898 	else
1899 		netdev_err(priv->ndev,
1900 			   "Controller changed into %s Mode (%u).\n",
1901 			   mcp251xfd_get_mode_str(mode), mode);
1902 
1903 	/* After the application requests Normal mode, the controller
1904 	 * will automatically attempt to retransmit the message that
1905 	 * caused the TX MAB underflow.
1906 	 *
1907 	 * However, if there is an ECC error in the TX-RAM, we first
1908 	 * have to reload the tx-object before requesting Normal
1909 	 * mode. This is done later in mcp251xfd_handle_eccif().
1910 	 */
1911 	if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1912 		*set_normal_mode = true;
1913 		return 0;
1914 	}
1915 
1916 	return mcp251xfd_chip_set_normal_mode_nowait(priv);
1917 }
1918 
1919 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1920 {
1921 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1922 	struct net_device_stats *stats = &priv->ndev->stats;
1923 	bool handled = false;
1924 
1925 	/* TX MAB underflow
1926 	 *
1927 	 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1928 	 * underflow is indicated by SERRIF and MODIF.
1929 	 *
1930 	 * In addition to the effects mentioned in the Errata, there
1931 	 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1932 	 * will be seen as well.
1933 	 *
1934 	 * Sometimes there is an ECC error in the TX-RAM, which leads
1935 	 * to a TX MAB underflow.
1936 	 *
1937 	 * However, probably due to a race condition, there is no
1938 	 * associated MODIF pending.
1939 	 *
1940 	 * Further, there are situations, where the SERRIF is caused
1941 	 * by an ECC error in the TX-RAM, but not even the ECCIF is
1942 	 * set. This only seems to happen _after_ the first occurrence
1943 	 * of a ECCIF (which is tracked in ecc->cnt).
1944 	 *
1945 	 * Treat all as a known system errors..
1946 	 */
1947 	if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1948 	     priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1949 	    priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1950 	    ecc->cnt) {
1951 		const char *msg;
1952 
1953 		if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1954 		    ecc->cnt)
1955 			msg = "TX MAB underflow due to ECC error detected.";
1956 		else
1957 			msg = "TX MAB underflow detected.";
1958 
1959 		if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1960 			netdev_dbg(priv->ndev, "%s\n", msg);
1961 		else
1962 			netdev_info(priv->ndev, "%s\n", msg);
1963 
1964 		stats->tx_aborted_errors++;
1965 		stats->tx_errors++;
1966 		handled = true;
1967 	}
1968 
1969 	/* RX MAB overflow
1970 	 *
1971 	 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1972 	 * overflow is indicated by SERRIF.
1973 	 *
1974 	 * In addition to the effects mentioned in the Errata, (most
1975 	 * of the times) a RXOVIF is raised, if the FIFO that is being
1976 	 * received into has the RXOVIE activated (and we have enabled
1977 	 * RXOVIE on all FIFOs).
1978 	 *
1979 	 * Sometimes there is no RXOVIF just a RXIF is pending.
1980 	 *
1981 	 * Treat all as a known system errors..
1982 	 */
1983 	if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1984 	    priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1985 		stats->rx_dropped++;
1986 		handled = true;
1987 	}
1988 
1989 	if (!handled)
1990 		netdev_err(priv->ndev,
1991 			   "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1992 			   priv->regs_status.intf);
1993 
1994 	return 0;
1995 }
1996 
1997 static int
1998 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1999 {
2000 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2001 	struct mcp251xfd_ecc *ecc = &priv->ecc;
2002 	struct mcp251xfd_tx_obj *tx_obj;
2003 	u8 chip_tx_tail, tx_tail, offset;
2004 	u16 addr;
2005 	int err;
2006 
2007 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
2008 
2009 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
2010 	if (err)
2011 		return err;
2012 
2013 	tx_tail = mcp251xfd_get_tx_tail(tx_ring);
2014 	offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
2015 
2016 	/* Bail out if one of the following is met:
2017 	 * - tx_tail information is inconsistent
2018 	 * - for mcp2517fd: offset not 0
2019 	 * - for mcp2518fd: offset not 0 or 1
2020 	 */
2021 	if (chip_tx_tail != tx_tail ||
2022 	    !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
2023 		netdev_err(priv->ndev,
2024 			   "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2025 			   addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2026 			   offset);
2027 		return -EINVAL;
2028 	}
2029 
2030 	netdev_info(priv->ndev,
2031 		    "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2032 		    ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2033 		    "Single" : "Double",
2034 		    addr, nr, tx_ring->tail, tx_tail, offset);
2035 
2036 	/* reload tx_obj into controller RAM ... */
2037 	tx_obj = &tx_ring->obj[nr];
2038 	err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2039 	if (err)
2040 		return err;
2041 
2042 	/* ... and trigger retransmit */
2043 	return mcp251xfd_chip_set_normal_mode(priv);
2044 }
2045 
2046 static int
2047 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2048 {
2049 	struct mcp251xfd_ecc *ecc = &priv->ecc;
2050 	const char *msg;
2051 	bool in_tx_ram;
2052 	u32 ecc_stat;
2053 	u16 addr;
2054 	u8 nr;
2055 	int err;
2056 
2057 	err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2058 	if (err)
2059 		return err;
2060 
2061 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2062 				 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2063 	if (err)
2064 		return err;
2065 
2066 	/* Check if ECC error occurred in TX-RAM */
2067 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2068 	err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2069 	if (!err)
2070 		in_tx_ram = true;
2071 	else if (err == -ENOENT)
2072 		in_tx_ram = false;
2073 	else
2074 		return err;
2075 
2076 	/* Errata Reference:
2077 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
2078 	 *
2079 	 * ECC single error correction does not work in all cases:
2080 	 *
2081 	 * Fix/Work Around:
2082 	 * Enable single error correction and double error detection
2083 	 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
2084 	 * detection interrupt and do not rely on the error
2085 	 * correction. Instead, handle both interrupts as a
2086 	 * notification that the RAM word at ERRADDR was corrupted.
2087 	 */
2088 	if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2089 		msg = "Single ECC Error detected at address";
2090 	else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2091 		msg = "Double ECC Error detected at address";
2092 	else
2093 		return -EINVAL;
2094 
2095 	if (!in_tx_ram) {
2096 		ecc->ecc_stat = 0;
2097 
2098 		netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2099 	} else {
2100 		/* Re-occurring error? */
2101 		if (ecc->ecc_stat == ecc_stat) {
2102 			ecc->cnt++;
2103 		} else {
2104 			ecc->ecc_stat = ecc_stat;
2105 			ecc->cnt = 1;
2106 		}
2107 
2108 		netdev_info(priv->ndev,
2109 			    "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2110 			    msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2111 
2112 		if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2113 			return mcp251xfd_handle_eccif_recover(priv, nr);
2114 	}
2115 
2116 	if (set_normal_mode)
2117 		return mcp251xfd_chip_set_normal_mode_nowait(priv);
2118 
2119 	return 0;
2120 }
2121 
2122 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2123 {
2124 	int err;
2125 	u32 crc;
2126 
2127 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2128 	if (err)
2129 		return err;
2130 
2131 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2132 				 MCP251XFD_REG_CRC_IF_MASK,
2133 				 ~crc);
2134 	if (err)
2135 		return err;
2136 
2137 	if (crc & MCP251XFD_REG_CRC_FERRIF)
2138 		netdev_notice(priv->ndev, "CRC write command format error.\n");
2139 	else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2140 		netdev_notice(priv->ndev,
2141 			      "CRC write error detected. CRC=0x%04lx.\n",
2142 			      FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2143 
2144 	return 0;
2145 }
2146 
2147 #define mcp251xfd_handle(priv, irq, ...) \
2148 ({ \
2149 	struct mcp251xfd_priv *_priv = (priv); \
2150 	int err; \
2151 \
2152 	err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2153 	if (err) \
2154 		netdev_err(_priv->ndev, \
2155 			"IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2156 			__stringify(irq), err); \
2157 	err; \
2158 })
2159 
2160 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2161 {
2162 	struct mcp251xfd_priv *priv = dev_id;
2163 	const int val_bytes = regmap_get_val_bytes(priv->map_reg);
2164 	irqreturn_t handled = IRQ_NONE;
2165 	int err;
2166 
2167 	if (priv->rx_int)
2168 		do {
2169 			int rx_pending;
2170 
2171 			rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2172 			if (!rx_pending)
2173 				break;
2174 
2175 			err = mcp251xfd_handle(priv, rxif);
2176 			if (err)
2177 				goto out_fail;
2178 
2179 			handled = IRQ_HANDLED;
2180 		} while (1);
2181 
2182 	do {
2183 		u32 intf_pending, intf_pending_clearable;
2184 		bool set_normal_mode = false;
2185 
2186 		err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2187 				       &priv->regs_status,
2188 				       sizeof(priv->regs_status) /
2189 				       val_bytes);
2190 		if (err)
2191 			goto out_fail;
2192 
2193 		intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2194 					 priv->regs_status.intf) &
2195 			FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2196 				  priv->regs_status.intf);
2197 
2198 		if (!(intf_pending))
2199 			return handled;
2200 
2201 		/* Some interrupts must be ACKed in the
2202 		 * MCP251XFD_REG_INT register.
2203 		 * - First ACK then handle, to avoid lost-IRQ race
2204 		 *   condition on fast re-occurring interrupts.
2205 		 * - Write "0" to clear active IRQs, "1" to all other,
2206 		 *   to avoid r/m/w race condition on the
2207 		 *   MCP251XFD_REG_INT register.
2208 		 */
2209 		intf_pending_clearable = intf_pending &
2210 			MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2211 		if (intf_pending_clearable) {
2212 			err = regmap_update_bits(priv->map_reg,
2213 						 MCP251XFD_REG_INT,
2214 						 MCP251XFD_REG_INT_IF_MASK,
2215 						 ~intf_pending_clearable);
2216 			if (err)
2217 				goto out_fail;
2218 		}
2219 
2220 		if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2221 			err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2222 			if (err)
2223 				goto out_fail;
2224 		}
2225 
2226 		if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2227 			err = mcp251xfd_handle(priv, rxif);
2228 			if (err)
2229 				goto out_fail;
2230 		}
2231 
2232 		if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2233 			err = mcp251xfd_handle(priv, tefif);
2234 			if (err)
2235 				goto out_fail;
2236 		}
2237 
2238 		if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2239 			err = mcp251xfd_handle(priv, rxovif);
2240 			if (err)
2241 				goto out_fail;
2242 		}
2243 
2244 		if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2245 			err = mcp251xfd_handle(priv, txatif);
2246 			if (err)
2247 				goto out_fail;
2248 		}
2249 
2250 		if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2251 			err = mcp251xfd_handle(priv, ivmif);
2252 			if (err)
2253 				goto out_fail;
2254 		}
2255 
2256 		if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2257 			err = mcp251xfd_handle(priv, serrif);
2258 			if (err)
2259 				goto out_fail;
2260 		}
2261 
2262 		if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2263 			err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2264 			if (err)
2265 				goto out_fail;
2266 		}
2267 
2268 		if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2269 			err = mcp251xfd_handle(priv, spicrcif);
2270 			if (err)
2271 				goto out_fail;
2272 		}
2273 
2274 		/* On the MCP2527FD and MCP2518FD, we don't get a
2275 		 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2276 		 * ERROR_ACTIVE.
2277 		 */
2278 		if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2279 		    priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2280 			err = mcp251xfd_handle(priv, cerrif);
2281 			if (err)
2282 				goto out_fail;
2283 
2284 			/* In Bus Off we completely shut down the
2285 			 * controller. Every subsequent register read
2286 			 * will read bogus data, and if
2287 			 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2288 			 * check will fail, too. So leave IRQ handler
2289 			 * directly.
2290 			 */
2291 			if (priv->can.state == CAN_STATE_BUS_OFF)
2292 				return IRQ_HANDLED;
2293 		}
2294 
2295 		handled = IRQ_HANDLED;
2296 	} while (1);
2297 
2298  out_fail:
2299 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2300 		   err, priv->regs_status.intf);
2301 	mcp251xfd_dump(priv);
2302 	mcp251xfd_chip_interrupts_disable(priv);
2303 
2304 	return handled;
2305 }
2306 
2307 static inline struct
2308 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2309 {
2310 	u8 tx_head;
2311 
2312 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2313 
2314 	return &tx_ring->obj[tx_head];
2315 }
2316 
2317 static void
2318 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2319 			  struct mcp251xfd_tx_obj *tx_obj,
2320 			  const struct sk_buff *skb,
2321 			  unsigned int seq)
2322 {
2323 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2324 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2325 	union mcp251xfd_tx_obj_load_buf *load_buf;
2326 	u8 dlc;
2327 	u32 id, flags;
2328 	int len_sanitized = 0, len;
2329 
2330 	if (cfd->can_id & CAN_EFF_FLAG) {
2331 		u32 sid, eid;
2332 
2333 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2334 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2335 
2336 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2337 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2338 
2339 		flags = MCP251XFD_OBJ_FLAGS_IDE;
2340 	} else {
2341 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2342 		flags = 0;
2343 	}
2344 
2345 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2346 	 * harm, only the lower 7 bits will be transferred into the
2347 	 * TEF object.
2348 	 */
2349 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
2350 
2351 	if (cfd->can_id & CAN_RTR_FLAG)
2352 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
2353 	else
2354 		len_sanitized = canfd_sanitize_len(cfd->len);
2355 
2356 	/* CANFD */
2357 	if (can_is_canfd_skb(skb)) {
2358 		if (cfd->flags & CANFD_ESI)
2359 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
2360 
2361 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
2362 
2363 		if (cfd->flags & CANFD_BRS)
2364 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
2365 
2366 		dlc = can_fd_len2dlc(cfd->len);
2367 	} else {
2368 		dlc = can_get_cc_dlc((struct can_frame *)cfd,
2369 				     priv->can.ctrlmode);
2370 	}
2371 
2372 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
2373 
2374 	load_buf = &tx_obj->buf;
2375 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2376 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
2377 	else
2378 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2379 
2380 	put_unaligned_le32(id, &hw_tx_obj->id);
2381 	put_unaligned_le32(flags, &hw_tx_obj->flags);
2382 
2383 	/* Copy data */
2384 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2385 
2386 	/* Clear unused data at end of CAN frame */
2387 	if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
2388 		int pad_len;
2389 
2390 		pad_len = len_sanitized - cfd->len;
2391 		if (pad_len)
2392 			memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
2393 	}
2394 
2395 	/* Number of bytes to be written into the RAM of the controller */
2396 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2397 	if (MCP251XFD_SANITIZE_CAN)
2398 		len += round_up(len_sanitized, sizeof(u32));
2399 	else
2400 		len += round_up(cfd->len, sizeof(u32));
2401 
2402 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2403 		u16 crc;
2404 
2405 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2406 						     len);
2407 		/* CRC */
2408 		len += sizeof(load_buf->crc.cmd);
2409 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2410 		put_unaligned_be16(crc, (void *)load_buf + len);
2411 
2412 		/* Total length */
2413 		len += sizeof(load_buf->crc.crc);
2414 	} else {
2415 		len += sizeof(load_buf->nocrc.cmd);
2416 	}
2417 
2418 	tx_obj->xfer[0].len = len;
2419 }
2420 
2421 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2422 				  struct mcp251xfd_tx_obj *tx_obj)
2423 {
2424 	return spi_async(priv->spi, &tx_obj->msg);
2425 }
2426 
2427 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2428 			      struct mcp251xfd_tx_ring *tx_ring)
2429 {
2430 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
2431 		return false;
2432 
2433 	netif_stop_queue(priv->ndev);
2434 
2435 	/* Memory barrier before checking tx_free (head and tail) */
2436 	smp_mb();
2437 
2438 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2439 		netdev_dbg(priv->ndev,
2440 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2441 			   tx_ring->head, tx_ring->tail,
2442 			   tx_ring->head - tx_ring->tail);
2443 
2444 		return true;
2445 	}
2446 
2447 	netif_start_queue(priv->ndev);
2448 
2449 	return false;
2450 }
2451 
2452 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2453 					struct net_device *ndev)
2454 {
2455 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2456 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2457 	struct mcp251xfd_tx_obj *tx_obj;
2458 	unsigned int frame_len;
2459 	u8 tx_head;
2460 	int err;
2461 
2462 	if (can_dropped_invalid_skb(ndev, skb))
2463 		return NETDEV_TX_OK;
2464 
2465 	if (mcp251xfd_tx_busy(priv, tx_ring))
2466 		return NETDEV_TX_BUSY;
2467 
2468 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2469 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2470 
2471 	/* Stop queue if we occupy the complete TX FIFO */
2472 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2473 	tx_ring->head++;
2474 	if (mcp251xfd_get_tx_free(tx_ring) == 0)
2475 		netif_stop_queue(ndev);
2476 
2477 	frame_len = can_skb_get_frame_len(skb);
2478 	err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
2479 	if (!err)
2480 		netdev_sent_queue(priv->ndev, frame_len);
2481 
2482 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
2483 	if (err)
2484 		goto out_err;
2485 
2486 	return NETDEV_TX_OK;
2487 
2488  out_err:
2489 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2490 
2491 	return NETDEV_TX_OK;
2492 }
2493 
2494 static int mcp251xfd_open(struct net_device *ndev)
2495 {
2496 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2497 	const struct spi_device *spi = priv->spi;
2498 	int err;
2499 
2500 	err = pm_runtime_get_sync(ndev->dev.parent);
2501 	if (err < 0) {
2502 		pm_runtime_put_noidle(ndev->dev.parent);
2503 		return err;
2504 	}
2505 
2506 	err = open_candev(ndev);
2507 	if (err)
2508 		goto out_pm_runtime_put;
2509 
2510 	err = mcp251xfd_ring_alloc(priv);
2511 	if (err)
2512 		goto out_close_candev;
2513 
2514 	err = mcp251xfd_transceiver_enable(priv);
2515 	if (err)
2516 		goto out_mcp251xfd_ring_free;
2517 
2518 	err = mcp251xfd_chip_start(priv);
2519 	if (err)
2520 		goto out_transceiver_disable;
2521 
2522 	mcp251xfd_timestamp_init(priv);
2523 	can_rx_offload_enable(&priv->offload);
2524 
2525 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2526 				   IRQF_ONESHOT, dev_name(&spi->dev),
2527 				   priv);
2528 	if (err)
2529 		goto out_can_rx_offload_disable;
2530 
2531 	err = mcp251xfd_chip_interrupts_enable(priv);
2532 	if (err)
2533 		goto out_free_irq;
2534 
2535 	netif_start_queue(ndev);
2536 
2537 	return 0;
2538 
2539  out_free_irq:
2540 	free_irq(spi->irq, priv);
2541  out_can_rx_offload_disable:
2542 	can_rx_offload_disable(&priv->offload);
2543 	mcp251xfd_timestamp_stop(priv);
2544  out_transceiver_disable:
2545 	mcp251xfd_transceiver_disable(priv);
2546  out_mcp251xfd_ring_free:
2547 	mcp251xfd_ring_free(priv);
2548  out_close_candev:
2549 	close_candev(ndev);
2550  out_pm_runtime_put:
2551 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2552 	pm_runtime_put(ndev->dev.parent);
2553 
2554 	return err;
2555 }
2556 
2557 static int mcp251xfd_stop(struct net_device *ndev)
2558 {
2559 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2560 
2561 	netif_stop_queue(ndev);
2562 	mcp251xfd_chip_interrupts_disable(priv);
2563 	free_irq(ndev->irq, priv);
2564 	can_rx_offload_disable(&priv->offload);
2565 	mcp251xfd_timestamp_stop(priv);
2566 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2567 	mcp251xfd_transceiver_disable(priv);
2568 	mcp251xfd_ring_free(priv);
2569 	close_candev(ndev);
2570 
2571 	pm_runtime_put(ndev->dev.parent);
2572 
2573 	return 0;
2574 }
2575 
2576 static const struct net_device_ops mcp251xfd_netdev_ops = {
2577 	.ndo_open = mcp251xfd_open,
2578 	.ndo_stop = mcp251xfd_stop,
2579 	.ndo_start_xmit	= mcp251xfd_start_xmit,
2580 	.ndo_change_mtu = can_change_mtu,
2581 };
2582 
2583 static void
2584 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2585 {
2586 	const struct spi_device *spi = priv->spi;
2587 	const struct spi_controller *ctlr = spi->controller;
2588 
2589 	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2590 		priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2591 }
2592 
2593 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2594 {
2595 	const struct net_device *ndev = priv->ndev;
2596 	const struct mcp251xfd_devtype_data *devtype_data;
2597 	u32 osc;
2598 	int err;
2599 
2600 	/* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2601 	 * autodetect the model.
2602 	 */
2603 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2604 				 MCP251XFD_REG_OSC_LPMEN,
2605 				 MCP251XFD_REG_OSC_LPMEN);
2606 	if (err)
2607 		return err;
2608 
2609 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2610 	if (err)
2611 		return err;
2612 
2613 	if (osc & MCP251XFD_REG_OSC_LPMEN)
2614 		devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2615 	else
2616 		devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2617 
2618 	if (!mcp251xfd_is_251X(priv) &&
2619 	    priv->devtype_data.model != devtype_data->model) {
2620 		netdev_info(ndev,
2621 			    "Detected %s, but firmware specifies a %s. Fixing up.",
2622 			    __mcp251xfd_get_model_str(devtype_data->model),
2623 			    mcp251xfd_get_model_str(priv));
2624 	}
2625 	priv->devtype_data = *devtype_data;
2626 
2627 	/* We need to preserve the Half Duplex Quirk. */
2628 	mcp251xfd_register_quirks(priv);
2629 
2630 	/* Re-init regmap with quirks of detected model. */
2631 	return mcp251xfd_regmap_init(priv);
2632 }
2633 
2634 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2635 {
2636 	int err, rx_pending;
2637 
2638 	if (!priv->rx_int)
2639 		return 0;
2640 
2641 	err = mcp251xfd_chip_rx_int_enable(priv);
2642 	if (err)
2643 		return err;
2644 
2645 	/* Check if RX_INT is properly working. The RX_INT should not
2646 	 * be active after a softreset.
2647 	 */
2648 	rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2649 
2650 	err = mcp251xfd_chip_rx_int_disable(priv);
2651 	if (err)
2652 		return err;
2653 
2654 	if (!rx_pending)
2655 		return 0;
2656 
2657 	netdev_info(priv->ndev,
2658 		    "RX_INT active after softreset, disabling RX_INT support.");
2659 	devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2660 	priv->rx_int = NULL;
2661 
2662 	return 0;
2663 }
2664 
2665 static int
2666 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2667 			      u32 *dev_id, u32 *effective_speed_hz)
2668 {
2669 	struct mcp251xfd_map_buf_nocrc *buf_rx;
2670 	struct mcp251xfd_map_buf_nocrc *buf_tx;
2671 	struct spi_transfer xfer[2] = { };
2672 	int err;
2673 
2674 	buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2675 	if (!buf_rx)
2676 		return -ENOMEM;
2677 
2678 	buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2679 	if (!buf_tx) {
2680 		err = -ENOMEM;
2681 		goto out_kfree_buf_rx;
2682 	}
2683 
2684 	xfer[0].tx_buf = buf_tx;
2685 	xfer[0].len = sizeof(buf_tx->cmd);
2686 	xfer[1].rx_buf = buf_rx->data;
2687 	xfer[1].len = sizeof(dev_id);
2688 
2689 	mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2690 	err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2691 	if (err)
2692 		goto out_kfree_buf_tx;
2693 
2694 	*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2695 	*effective_speed_hz = xfer->effective_speed_hz;
2696 
2697  out_kfree_buf_tx:
2698 	kfree(buf_tx);
2699  out_kfree_buf_rx:
2700 	kfree(buf_rx);
2701 
2702 	return 0;
2703 }
2704 
2705 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2706 	(priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2707 
2708 static int
2709 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2710 {
2711 	u32 dev_id, effective_speed_hz;
2712 	int err;
2713 
2714 	err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2715 					    &effective_speed_hz);
2716 	if (err)
2717 		return err;
2718 
2719 	netdev_info(priv->ndev,
2720 		    "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2721 		    mcp251xfd_get_model_str(priv),
2722 		    FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2723 		    FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2724 		    priv->rx_int ? '+' : '-',
2725 		    MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2726 		    MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2727 		    MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2728 		    MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2729 		    MCP251XFD_QUIRK_ACTIVE(ECC),
2730 		    MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2731 		    priv->can.clock.freq / 1000000,
2732 		    priv->can.clock.freq % 1000000 / 1000 / 10,
2733 		    priv->spi_max_speed_hz_orig / 1000000,
2734 		    priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2735 		    priv->spi->max_speed_hz / 1000000,
2736 		    priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2737 		    effective_speed_hz / 1000000,
2738 		    effective_speed_hz % 1000000 / 1000 / 10);
2739 
2740 	return 0;
2741 }
2742 
2743 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2744 {
2745 	struct net_device *ndev = priv->ndev;
2746 	int err;
2747 
2748 	err = mcp251xfd_clks_and_vdd_enable(priv);
2749 	if (err)
2750 		return err;
2751 
2752 	pm_runtime_get_noresume(ndev->dev.parent);
2753 	err = pm_runtime_set_active(ndev->dev.parent);
2754 	if (err)
2755 		goto out_runtime_put_noidle;
2756 	pm_runtime_enable(ndev->dev.parent);
2757 
2758 	mcp251xfd_register_quirks(priv);
2759 
2760 	err = mcp251xfd_chip_softreset(priv);
2761 	if (err == -ENODEV)
2762 		goto out_runtime_disable;
2763 	if (err)
2764 		goto out_chip_set_mode_sleep;
2765 
2766 	err = mcp251xfd_register_chip_detect(priv);
2767 	if (err)
2768 		goto out_chip_set_mode_sleep;
2769 
2770 	err = mcp251xfd_register_check_rx_int(priv);
2771 	if (err)
2772 		goto out_chip_set_mode_sleep;
2773 
2774 	err = register_candev(ndev);
2775 	if (err)
2776 		goto out_chip_set_mode_sleep;
2777 
2778 	err = mcp251xfd_register_done(priv);
2779 	if (err)
2780 		goto out_unregister_candev;
2781 
2782 	/* Put controller into sleep mode and let pm_runtime_put()
2783 	 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2784 	 * the clocks and vdd will stay powered.
2785 	 */
2786 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2787 	if (err)
2788 		goto out_unregister_candev;
2789 
2790 	pm_runtime_put(ndev->dev.parent);
2791 
2792 	return 0;
2793 
2794  out_unregister_candev:
2795 	unregister_candev(ndev);
2796  out_chip_set_mode_sleep:
2797 	mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2798  out_runtime_disable:
2799 	pm_runtime_disable(ndev->dev.parent);
2800  out_runtime_put_noidle:
2801 	pm_runtime_put_noidle(ndev->dev.parent);
2802 	mcp251xfd_clks_and_vdd_disable(priv);
2803 
2804 	return err;
2805 }
2806 
2807 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2808 {
2809 	struct net_device *ndev	= priv->ndev;
2810 
2811 	unregister_candev(ndev);
2812 
2813 	pm_runtime_get_sync(ndev->dev.parent);
2814 	pm_runtime_put_noidle(ndev->dev.parent);
2815 	mcp251xfd_clks_and_vdd_disable(priv);
2816 	pm_runtime_disable(ndev->dev.parent);
2817 }
2818 
2819 static const struct of_device_id mcp251xfd_of_match[] = {
2820 	{
2821 		.compatible = "microchip,mcp2517fd",
2822 		.data = &mcp251xfd_devtype_data_mcp2517fd,
2823 	}, {
2824 		.compatible = "microchip,mcp2518fd",
2825 		.data = &mcp251xfd_devtype_data_mcp2518fd,
2826 	}, {
2827 		.compatible = "microchip,mcp251xfd",
2828 		.data = &mcp251xfd_devtype_data_mcp251xfd,
2829 	}, {
2830 		/* sentinel */
2831 	},
2832 };
2833 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2834 
2835 static const struct spi_device_id mcp251xfd_id_table[] = {
2836 	{
2837 		.name = "mcp2517fd",
2838 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2839 	}, {
2840 		.name = "mcp2518fd",
2841 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2842 	}, {
2843 		.name = "mcp251xfd",
2844 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2845 	}, {
2846 		/* sentinel */
2847 	},
2848 };
2849 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2850 
2851 static int mcp251xfd_probe(struct spi_device *spi)
2852 {
2853 	const void *match;
2854 	struct net_device *ndev;
2855 	struct mcp251xfd_priv *priv;
2856 	struct gpio_desc *rx_int;
2857 	struct regulator *reg_vdd, *reg_xceiver;
2858 	struct clk *clk;
2859 	u32 freq;
2860 	int err;
2861 
2862 	if (!spi->irq)
2863 		return dev_err_probe(&spi->dev, -ENXIO,
2864 				     "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2865 
2866 	rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2867 					 GPIOD_IN);
2868 	if (IS_ERR(rx_int))
2869 		return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
2870 				     "Failed to get RX-INT!\n");
2871 
2872 	reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2873 	if (PTR_ERR(reg_vdd) == -ENODEV)
2874 		reg_vdd = NULL;
2875 	else if (IS_ERR(reg_vdd))
2876 		return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
2877 				     "Failed to get VDD regulator!\n");
2878 
2879 	reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2880 	if (PTR_ERR(reg_xceiver) == -ENODEV)
2881 		reg_xceiver = NULL;
2882 	else if (IS_ERR(reg_xceiver))
2883 		return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
2884 				     "Failed to get Transceiver regulator!\n");
2885 
2886 	clk = devm_clk_get(&spi->dev, NULL);
2887 	if (IS_ERR(clk))
2888 		return dev_err_probe(&spi->dev, PTR_ERR(clk),
2889 				     "Failed to get Oscillator (clock)!\n");
2890 	freq = clk_get_rate(clk);
2891 
2892 	/* Sanity check */
2893 	if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2894 	    freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2895 		dev_err(&spi->dev,
2896 			"Oscillator frequency (%u Hz) is too low or high.\n",
2897 			freq);
2898 		return -ERANGE;
2899 	}
2900 
2901 	if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2902 		dev_err(&spi->dev,
2903 			"Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2904 			freq);
2905 		return -ERANGE;
2906 	}
2907 
2908 	ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2909 			    MCP251XFD_TX_OBJ_NUM_MAX);
2910 	if (!ndev)
2911 		return -ENOMEM;
2912 
2913 	SET_NETDEV_DEV(ndev, &spi->dev);
2914 
2915 	ndev->netdev_ops = &mcp251xfd_netdev_ops;
2916 	ndev->irq = spi->irq;
2917 	ndev->flags |= IFF_ECHO;
2918 
2919 	priv = netdev_priv(ndev);
2920 	spi_set_drvdata(spi, priv);
2921 	priv->can.clock.freq = freq;
2922 	priv->can.do_set_mode = mcp251xfd_set_mode;
2923 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2924 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2925 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2926 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2927 		CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
2928 		CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
2929 		CAN_CTRLMODE_CC_LEN8_DLC;
2930 	priv->ndev = ndev;
2931 	priv->spi = spi;
2932 	priv->rx_int = rx_int;
2933 	priv->clk = clk;
2934 	priv->reg_vdd = reg_vdd;
2935 	priv->reg_xceiver = reg_xceiver;
2936 
2937 	match = device_get_match_data(&spi->dev);
2938 	if (match)
2939 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2940 	else
2941 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2942 			spi_get_device_id(spi)->driver_data;
2943 
2944 	/* Errata Reference:
2945 	 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
2946 	 *
2947 	 * The SPI can write corrupted data to the RAM at fast SPI
2948 	 * speeds:
2949 	 *
2950 	 * Simultaneous activity on the CAN bus while writing data to
2951 	 * RAM via the SPI interface, with high SCK frequency, can
2952 	 * lead to corrupted data being written to RAM.
2953 	 *
2954 	 * Fix/Work Around:
2955 	 * Ensure that FSCK is less than or equal to 0.85 *
2956 	 * (FSYSCLK/2).
2957 	 *
2958 	 * Known good combinations are:
2959 	 *
2960 	 * MCP	ext-clk	SoC			SPI			SPI-clk		max-clk	parent-clk	config
2961 	 *
2962 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 8333333 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2963 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	16666667 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2964 	 * 2517	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2965 	 * 2518	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2966 	 * 2518	40 MHz	fsl,imx6dl		fsl,imx51-ecspi		15000000 Hz	 75.00%	 30000000 Hz	default
2967 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 8333333 Hz	 83.33%	 16666667 Hz	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2968 	 *
2969 	 */
2970 	priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2971 	spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2972 	spi->bits_per_word = 8;
2973 	spi->rt = true;
2974 	err = spi_setup(spi);
2975 	if (err)
2976 		goto out_free_candev;
2977 
2978 	err = mcp251xfd_regmap_init(priv);
2979 	if (err)
2980 		goto out_free_candev;
2981 
2982 	err = can_rx_offload_add_manual(ndev, &priv->offload,
2983 					MCP251XFD_NAPI_WEIGHT);
2984 	if (err)
2985 		goto out_free_candev;
2986 
2987 	err = mcp251xfd_register(priv);
2988 	if (err)
2989 		goto out_can_rx_offload_del;
2990 
2991 	return 0;
2992 
2993  out_can_rx_offload_del:
2994 	can_rx_offload_del(&priv->offload);
2995  out_free_candev:
2996 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2997 
2998 	free_candev(ndev);
2999 
3000 	return err;
3001 }
3002 
3003 static int mcp251xfd_remove(struct spi_device *spi)
3004 {
3005 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
3006 	struct net_device *ndev = priv->ndev;
3007 
3008 	can_rx_offload_del(&priv->offload);
3009 	mcp251xfd_unregister(priv);
3010 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
3011 	free_candev(ndev);
3012 
3013 	return 0;
3014 }
3015 
3016 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
3017 {
3018 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3019 
3020 	return mcp251xfd_clks_and_vdd_disable(priv);
3021 }
3022 
3023 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
3024 {
3025 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3026 
3027 	return mcp251xfd_clks_and_vdd_enable(priv);
3028 }
3029 
3030 static const struct dev_pm_ops mcp251xfd_pm_ops = {
3031 	SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
3032 			   mcp251xfd_runtime_resume, NULL)
3033 };
3034 
3035 static struct spi_driver mcp251xfd_driver = {
3036 	.driver = {
3037 		.name = DEVICE_NAME,
3038 		.pm = &mcp251xfd_pm_ops,
3039 		.of_match_table = mcp251xfd_of_match,
3040 	},
3041 	.probe = mcp251xfd_probe,
3042 	.remove = mcp251xfd_remove,
3043 	.id_table = mcp251xfd_id_table,
3044 };
3045 module_spi_driver(mcp251xfd_driver);
3046 
3047 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3048 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3049 MODULE_LICENSE("GPL v2");
3050