1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020 Pengutronix,
6 //                          Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/pm_runtime.h>
23 
24 #include <asm/unaligned.h>
25 
26 #include "mcp251xfd.h"
27 
28 #define DEVICE_NAME "mcp251xfd"
29 
30 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
31 	.quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
32 		MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
33 		MCP251XFD_QUIRK_ECC,
34 	.model = MCP251XFD_MODEL_MCP2517FD,
35 };
36 
37 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
38 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
39 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
40 	.model = MCP251XFD_MODEL_MCP2518FD,
41 };
42 
43 /* Autodetect model, start with CRC enabled. */
44 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
45 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
46 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
47 	.model = MCP251XFD_MODEL_MCP251XFD,
48 };
49 
50 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
51 	.name = DEVICE_NAME,
52 	.tseg1_min = 2,
53 	.tseg1_max = 256,
54 	.tseg2_min = 1,
55 	.tseg2_max = 128,
56 	.sjw_max = 128,
57 	.brp_min = 1,
58 	.brp_max = 256,
59 	.brp_inc = 1,
60 };
61 
62 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
63 	.name = DEVICE_NAME,
64 	.tseg1_min = 1,
65 	.tseg1_max = 32,
66 	.tseg2_min = 1,
67 	.tseg2_max = 16,
68 	.sjw_max = 16,
69 	.brp_min = 1,
70 	.brp_max = 256,
71 	.brp_inc = 1,
72 };
73 
74 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
75 {
76 	switch (model) {
77 	case MCP251XFD_MODEL_MCP2517FD:
78 		return "MCP2517FD";
79 	case MCP251XFD_MODEL_MCP2518FD:
80 		return "MCP2518FD";
81 	case MCP251XFD_MODEL_MCP251XFD:
82 		return "MCP251xFD";
83 	}
84 
85 	return "<unknown>";
86 }
87 
88 static inline const char *
89 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
90 {
91 	return __mcp251xfd_get_model_str(priv->devtype_data.model);
92 }
93 
94 static const char *mcp251xfd_get_mode_str(const u8 mode)
95 {
96 	switch (mode) {
97 	case MCP251XFD_REG_CON_MODE_MIXED:
98 		return "Mixed (CAN FD/CAN 2.0)";
99 	case MCP251XFD_REG_CON_MODE_SLEEP:
100 		return "Sleep";
101 	case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
102 		return "Internal Loopback";
103 	case MCP251XFD_REG_CON_MODE_LISTENONLY:
104 		return "Listen Only";
105 	case MCP251XFD_REG_CON_MODE_CONFIG:
106 		return "Configuration";
107 	case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
108 		return "External Loopback";
109 	case MCP251XFD_REG_CON_MODE_CAN2_0:
110 		return "CAN 2.0";
111 	case MCP251XFD_REG_CON_MODE_RESTRICTED:
112 		return "Restricted Operation";
113 	}
114 
115 	return "<unknown>";
116 }
117 
118 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
119 {
120 	if (!priv->reg_vdd)
121 		return 0;
122 
123 	return regulator_enable(priv->reg_vdd);
124 }
125 
126 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
127 {
128 	if (!priv->reg_vdd)
129 		return 0;
130 
131 	return regulator_disable(priv->reg_vdd);
132 }
133 
134 static inline int
135 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
136 {
137 	if (!priv->reg_xceiver)
138 		return 0;
139 
140 	return regulator_enable(priv->reg_xceiver);
141 }
142 
143 static inline int
144 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
145 {
146 	if (!priv->reg_xceiver)
147 		return 0;
148 
149 	return regulator_disable(priv->reg_xceiver);
150 }
151 
152 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
153 {
154 	int err;
155 
156 	err = clk_prepare_enable(priv->clk);
157 	if (err)
158 		return err;
159 
160 	err = mcp251xfd_vdd_enable(priv);
161 	if (err)
162 		clk_disable_unprepare(priv->clk);
163 
164 	/* Wait for oscillator stabilisation time after power up */
165 	usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
166 		     2 * MCP251XFD_OSC_STAB_SLEEP_US);
167 
168 	return err;
169 }
170 
171 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
172 {
173 	int err;
174 
175 	err = mcp251xfd_vdd_disable(priv);
176 	if (err)
177 		return err;
178 
179 	clk_disable_unprepare(priv->clk);
180 
181 	return 0;
182 }
183 
184 static inline u8
185 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
186 				union mcp251xfd_write_reg_buf *write_reg_buf,
187 				const u16 reg, const u32 mask, const u32 val)
188 {
189 	u8 first_byte, last_byte, len;
190 	u8 *data;
191 	__le32 val_le32;
192 
193 	first_byte = mcp251xfd_first_byte_set(mask);
194 	last_byte = mcp251xfd_last_byte_set(mask);
195 	len = last_byte - first_byte + 1;
196 
197 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
198 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
199 	memcpy(data, &val_le32, len);
200 
201 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
202 		u16 crc;
203 
204 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
205 						     len);
206 		/* CRC */
207 		len += sizeof(write_reg_buf->crc.cmd);
208 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
209 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
210 
211 		/* Total length */
212 		len += sizeof(write_reg_buf->crc.crc);
213 	} else {
214 		len += sizeof(write_reg_buf->nocrc.cmd);
215 	}
216 
217 	return len;
218 }
219 
220 static inline int
221 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
222 				 u8 *tef_tail)
223 {
224 	u32 tef_ua;
225 	int err;
226 
227 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
228 	if (err)
229 		return err;
230 
231 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
232 
233 	return 0;
234 }
235 
236 static inline int
237 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
238 				u8 *tx_tail)
239 {
240 	u32 fifo_sta;
241 	int err;
242 
243 	err = regmap_read(priv->map_reg,
244 			  MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
245 			  &fifo_sta);
246 	if (err)
247 		return err;
248 
249 	*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
250 
251 	return 0;
252 }
253 
254 static inline int
255 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
256 				const struct mcp251xfd_rx_ring *ring,
257 				u8 *rx_head)
258 {
259 	u32 fifo_sta;
260 	int err;
261 
262 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
263 			  &fifo_sta);
264 	if (err)
265 		return err;
266 
267 	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
268 
269 	return 0;
270 }
271 
272 static inline int
273 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
274 				const struct mcp251xfd_rx_ring *ring,
275 				u8 *rx_tail)
276 {
277 	u32 fifo_ua;
278 	int err;
279 
280 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
281 			  &fifo_ua);
282 	if (err)
283 		return err;
284 
285 	fifo_ua -= ring->base - MCP251XFD_RAM_START;
286 	*rx_tail = fifo_ua / ring->obj_size;
287 
288 	return 0;
289 }
290 
291 static void
292 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
293 			      const struct mcp251xfd_tx_ring *ring,
294 			      struct mcp251xfd_tx_obj *tx_obj,
295 			      const u8 rts_buf_len,
296 			      const u8 n)
297 {
298 	struct spi_transfer *xfer;
299 	u16 addr;
300 
301 	/* FIFO load */
302 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
303 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
304 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
305 						     addr);
306 	else
307 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
308 					      addr);
309 
310 	xfer = &tx_obj->xfer[0];
311 	xfer->tx_buf = &tx_obj->buf;
312 	xfer->len = 0;	/* actual len is assigned on the fly */
313 	xfer->cs_change = 1;
314 	xfer->cs_change_delay.value = 0;
315 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
316 
317 	/* FIFO request to send */
318 	xfer = &tx_obj->xfer[1];
319 	xfer->tx_buf = &ring->rts_buf;
320 	xfer->len = rts_buf_len;
321 
322 	/* SPI message */
323 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
324 					ARRAY_SIZE(tx_obj->xfer));
325 }
326 
327 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
328 {
329 	struct mcp251xfd_tef_ring *tef_ring;
330 	struct mcp251xfd_tx_ring *tx_ring;
331 	struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
332 	struct mcp251xfd_tx_obj *tx_obj;
333 	u32 val;
334 	u16 addr;
335 	u8 len;
336 	int i, j;
337 
338 	/* TEF */
339 	tef_ring = priv->tef;
340 	tef_ring->head = 0;
341 	tef_ring->tail = 0;
342 
343 	/* FIFO increment TEF tail pointer */
344 	addr = MCP251XFD_REG_TEFCON;
345 	val = MCP251XFD_REG_TEFCON_UINC;
346 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
347 					      addr, val, val);
348 
349 	for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
350 		struct spi_transfer *xfer;
351 
352 		xfer = &tef_ring->uinc_xfer[j];
353 		xfer->tx_buf = &tef_ring->uinc_buf;
354 		xfer->len = len;
355 		xfer->cs_change = 1;
356 		xfer->cs_change_delay.value = 0;
357 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
358 	}
359 
360 	/* TX */
361 	tx_ring = priv->tx;
362 	tx_ring->head = 0;
363 	tx_ring->tail = 0;
364 	tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
365 
366 	/* FIFO request to send */
367 	addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
368 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
369 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
370 					      addr, val, val);
371 
372 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
373 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
374 
375 	/* RX */
376 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
377 		rx_ring->head = 0;
378 		rx_ring->tail = 0;
379 		rx_ring->nr = i;
380 		rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
381 
382 		if (!prev_rx_ring)
383 			rx_ring->base =
384 				mcp251xfd_get_tx_obj_addr(tx_ring,
385 							  tx_ring->obj_num);
386 		else
387 			rx_ring->base = prev_rx_ring->base +
388 				prev_rx_ring->obj_size *
389 				prev_rx_ring->obj_num;
390 
391 		prev_rx_ring = rx_ring;
392 
393 		/* FIFO increment RX tail pointer */
394 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
395 		val = MCP251XFD_REG_FIFOCON_UINC;
396 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
397 						      addr, val, val);
398 
399 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
400 			struct spi_transfer *xfer;
401 
402 			xfer = &rx_ring->uinc_xfer[j];
403 			xfer->tx_buf = &rx_ring->uinc_buf;
404 			xfer->len = len;
405 			xfer->cs_change = 1;
406 			xfer->cs_change_delay.value = 0;
407 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
408 		}
409 	}
410 }
411 
412 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
413 {
414 	int i;
415 
416 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
417 		kfree(priv->rx[i]);
418 		priv->rx[i] = NULL;
419 	}
420 }
421 
422 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
423 {
424 	struct mcp251xfd_tx_ring *tx_ring;
425 	struct mcp251xfd_rx_ring *rx_ring;
426 	int tef_obj_size, tx_obj_size, rx_obj_size;
427 	int tx_obj_num;
428 	int ram_free, i;
429 
430 	tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
431 	/* listen-only mode works like FD mode */
432 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
433 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
434 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
435 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
436 	} else {
437 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
438 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
439 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
440 	}
441 
442 	tx_ring = priv->tx;
443 	tx_ring->obj_num = tx_obj_num;
444 	tx_ring->obj_size = tx_obj_size;
445 
446 	ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
447 		(tef_obj_size + tx_obj_size);
448 
449 	for (i = 0;
450 	     i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
451 	     i++) {
452 		int rx_obj_num;
453 
454 		rx_obj_num = ram_free / rx_obj_size;
455 		rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
456 				 MCP251XFD_RX_OBJ_NUM_MAX);
457 
458 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
459 				  GFP_KERNEL);
460 		if (!rx_ring) {
461 			mcp251xfd_ring_free(priv);
462 			return -ENOMEM;
463 		}
464 		rx_ring->obj_num = rx_obj_num;
465 		rx_ring->obj_size = rx_obj_size;
466 		priv->rx[i] = rx_ring;
467 
468 		ram_free -= rx_ring->obj_num * rx_ring->obj_size;
469 	}
470 	priv->rx_ring_num = i;
471 
472 	netdev_dbg(priv->ndev,
473 		   "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
474 		   tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
475 		   tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
476 
477 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
478 		netdev_dbg(priv->ndev,
479 			   "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
480 			   i, rx_ring->obj_num, rx_ring->obj_size,
481 			   rx_ring->obj_size * rx_ring->obj_num);
482 	}
483 
484 	netdev_dbg(priv->ndev,
485 		   "FIFO setup: free: %d bytes\n",
486 		   ram_free);
487 
488 	return 0;
489 }
490 
491 static inline int
492 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
493 {
494 	u32 val;
495 	int err;
496 
497 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
498 	if (err)
499 		return err;
500 
501 	*mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
502 
503 	return 0;
504 }
505 
506 static int
507 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
508 			  const u8 mode_req, bool nowait)
509 {
510 	u32 con, con_reqop;
511 	int err;
512 
513 	con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
514 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
515 				 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
516 	if (err)
517 		return err;
518 
519 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
520 		return 0;
521 
522 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
523 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
524 						 con) == mode_req,
525 				       MCP251XFD_POLL_SLEEP_US,
526 				       MCP251XFD_POLL_TIMEOUT_US);
527 	if (err) {
528 		u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
529 
530 		netdev_err(priv->ndev,
531 			   "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
532 			   mcp251xfd_get_mode_str(mode_req), mode_req,
533 			   mcp251xfd_get_mode_str(mode), mode);
534 		return err;
535 	}
536 
537 	return 0;
538 }
539 
540 static inline int
541 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
542 			const u8 mode_req)
543 {
544 	return __mcp251xfd_chip_set_mode(priv, mode_req, false);
545 }
546 
547 static inline int
548 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
549 			       const u8 mode_req)
550 {
551 	return __mcp251xfd_chip_set_mode(priv, mode_req, true);
552 }
553 
554 static inline bool mcp251xfd_osc_invalid(u32 reg)
555 {
556 	return reg == 0x0 || reg == 0xffffffff;
557 }
558 
559 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
560 {
561 	u32 osc, osc_reference, osc_mask;
562 	int err;
563 
564 	/* Set Power On Defaults for "Clock Output Divisor" and remove
565 	 * "Oscillator Disable" bit.
566 	 */
567 	osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
568 			 MCP251XFD_REG_OSC_CLKODIV_10);
569 	osc_reference = MCP251XFD_REG_OSC_OSCRDY;
570 	osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
571 
572 	/* Note:
573 	 *
574 	 * If the controller is in Sleep Mode the following write only
575 	 * removes the "Oscillator Disable" bit and powers it up. All
576 	 * other bits are unaffected.
577 	 */
578 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
579 	if (err)
580 		return err;
581 
582 	/* Wait for "Oscillator Ready" bit */
583 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
584 				       (osc & osc_mask) == osc_reference,
585 				       MCP251XFD_OSC_STAB_SLEEP_US,
586 				       MCP251XFD_OSC_STAB_TIMEOUT_US);
587 	if (mcp251xfd_osc_invalid(osc)) {
588 		netdev_err(priv->ndev,
589 			   "Failed to detect %s (osc=0x%08x).\n",
590 			   mcp251xfd_get_model_str(priv), osc);
591 		return -ENODEV;
592 	} else if (err == -ETIMEDOUT) {
593 		netdev_err(priv->ndev,
594 			   "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
595 			   osc, osc_reference);
596 		return -ETIMEDOUT;
597 	}
598 
599 	return err;
600 }
601 
602 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
603 {
604 	const __be16 cmd = mcp251xfd_cmd_reset();
605 	int err;
606 
607 	/* The Set Mode and SPI Reset command only seems to works if
608 	 * the controller is not in Sleep Mode.
609 	 */
610 	err = mcp251xfd_chip_clock_enable(priv);
611 	if (err)
612 		return err;
613 
614 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
615 	if (err)
616 		return err;
617 
618 	/* spi_write_then_read() works with non DMA-safe buffers */
619 	return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
620 }
621 
622 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
623 {
624 	u32 osc, osc_reference;
625 	u8 mode;
626 	int err;
627 
628 	err = mcp251xfd_chip_get_mode(priv, &mode);
629 	if (err)
630 		return err;
631 
632 	if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
633 		netdev_info(priv->ndev,
634 			    "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
635 			    mcp251xfd_get_mode_str(mode), mode);
636 		return -ETIMEDOUT;
637 	}
638 
639 	osc_reference = MCP251XFD_REG_OSC_OSCRDY |
640 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
641 			   MCP251XFD_REG_OSC_CLKODIV_10);
642 
643 	/* check reset defaults of OSC reg */
644 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
645 	if (err)
646 		return err;
647 
648 	if (osc != osc_reference) {
649 		netdev_info(priv->ndev,
650 			    "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
651 			    osc, osc_reference);
652 		return -ETIMEDOUT;
653 	}
654 
655 	return 0;
656 }
657 
658 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
659 {
660 	int err, i;
661 
662 	for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
663 		if (i)
664 			netdev_info(priv->ndev,
665 				    "Retrying to reset controller.\n");
666 
667 		err = mcp251xfd_chip_softreset_do(priv);
668 		if (err == -ETIMEDOUT)
669 			continue;
670 		if (err)
671 			return err;
672 
673 		err = mcp251xfd_chip_softreset_check(priv);
674 		if (err == -ETIMEDOUT)
675 			continue;
676 		if (err)
677 			return err;
678 
679 		return 0;
680 	}
681 
682 	return err;
683 }
684 
685 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
686 {
687 	u32 osc;
688 	int err;
689 
690 	/* Activate Low Power Mode on Oscillator Disable. This only
691 	 * works on the MCP2518FD. The MCP2517FD will go into normal
692 	 * Sleep Mode instead.
693 	 */
694 	osc = MCP251XFD_REG_OSC_LPMEN |
695 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
696 			   MCP251XFD_REG_OSC_CLKODIV_10);
697 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
698 	if (err)
699 		return err;
700 
701 	/* Set Time Base Counter Prescaler to 1.
702 	 *
703 	 * This means an overflow of the 32 bit Time Base Counter
704 	 * register at 40 MHz every 107 seconds.
705 	 */
706 	return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
707 			    MCP251XFD_REG_TSCON_TBCEN);
708 }
709 
710 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
711 {
712 	const struct can_bittiming *bt = &priv->can.bittiming;
713 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
714 	u32 val = 0;
715 	s8 tdco;
716 	int err;
717 
718 	/* CAN Control Register
719 	 *
720 	 * - no transmit bandwidth sharing
721 	 * - config mode
722 	 * - disable transmit queue
723 	 * - store in transmit FIFO event
724 	 * - transition to restricted operation mode on system error
725 	 * - ESI is transmitted recessive when ESI of message is high or
726 	 *   CAN controller error passive
727 	 * - restricted retransmission attempts,
728 	 *   use TQXCON_TXAT and FIFOCON_TXAT
729 	 * - wake-up filter bits T11FILTER
730 	 * - use CAN bus line filter for wakeup
731 	 * - protocol exception is treated as a form error
732 	 * - Do not compare data bytes
733 	 */
734 	val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
735 			 MCP251XFD_REG_CON_MODE_CONFIG) |
736 		MCP251XFD_REG_CON_STEF |
737 		MCP251XFD_REG_CON_ESIGM |
738 		MCP251XFD_REG_CON_RTXAT |
739 		FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
740 			   MCP251XFD_REG_CON_WFT_T11FILTER) |
741 		MCP251XFD_REG_CON_WAKFIL |
742 		MCP251XFD_REG_CON_PXEDIS;
743 
744 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
745 		val |= MCP251XFD_REG_CON_ISOCRCEN;
746 
747 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
748 	if (err)
749 		return err;
750 
751 	/* Nominal Bit Time */
752 	val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
753 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
754 			   bt->prop_seg + bt->phase_seg1 - 1) |
755 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
756 			   bt->phase_seg2 - 1) |
757 		FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
758 
759 	err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
760 	if (err)
761 		return err;
762 
763 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
764 		return 0;
765 
766 	/* Data Bit Time */
767 	val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
768 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
769 			   dbt->prop_seg + dbt->phase_seg1 - 1) |
770 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
771 			   dbt->phase_seg2 - 1) |
772 		FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
773 
774 	err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
775 	if (err)
776 		return err;
777 
778 	/* Transmitter Delay Compensation */
779 	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
780 		       -64, 63);
781 	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
782 			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
783 		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
784 
785 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
786 }
787 
788 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
789 {
790 	u32 val;
791 
792 	if (!priv->rx_int)
793 		return 0;
794 
795 	/* Configure GPIOs:
796 	 * - PIN0: GPIO Input
797 	 * - PIN1: GPIO Input/RX Interrupt
798 	 *
799 	 * PIN1 must be Input, otherwise there is a glitch on the
800 	 * rx-INT line. It happens between setting the PIN as output
801 	 * (in the first byte of the SPI transfer) and configuring the
802 	 * PIN as interrupt (in the last byte of the SPI transfer).
803 	 */
804 	val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
805 		MCP251XFD_REG_IOCON_TRIS0;
806 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
807 }
808 
809 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
810 {
811 	u32 val;
812 
813 	if (!priv->rx_int)
814 		return 0;
815 
816 	/* Configure GPIOs:
817 	 * - PIN0: GPIO Input
818 	 * - PIN1: GPIO Input
819 	 */
820 	val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
821 		MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
822 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
823 }
824 
825 static int
826 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
827 				const struct mcp251xfd_rx_ring *ring)
828 {
829 	u32 fifo_con;
830 
831 	/* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
832 	 *
833 	 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
834 	 * generate a RXOVIF, use this to properly detect RX MAB
835 	 * overflows.
836 	 */
837 	fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
838 			      ring->obj_num - 1) |
839 		MCP251XFD_REG_FIFOCON_RXTSEN |
840 		MCP251XFD_REG_FIFOCON_RXOVIE |
841 		MCP251XFD_REG_FIFOCON_TFNRFNIE;
842 
843 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
844 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
845 				       MCP251XFD_REG_FIFOCON_PLSIZE_64);
846 	else
847 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
848 				       MCP251XFD_REG_FIFOCON_PLSIZE_8);
849 
850 	return regmap_write(priv->map_reg,
851 			    MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
852 }
853 
854 static int
855 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
856 				  const struct mcp251xfd_rx_ring *ring)
857 {
858 	u32 fltcon;
859 
860 	fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
861 		MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
862 
863 	return regmap_update_bits(priv->map_reg,
864 				  MCP251XFD_REG_FLTCON(ring->nr >> 2),
865 				  MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
866 				  fltcon);
867 }
868 
869 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
870 {
871 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
872 	const struct mcp251xfd_rx_ring *rx_ring;
873 	u32 val;
874 	int err, n;
875 
876 	/* TEF */
877 	val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
878 			 tx_ring->obj_num - 1) |
879 		MCP251XFD_REG_TEFCON_TEFTSEN |
880 		MCP251XFD_REG_TEFCON_TEFOVIE |
881 		MCP251XFD_REG_TEFCON_TEFNEIE;
882 
883 	err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
884 	if (err)
885 		return err;
886 
887 	/* FIFO 1 - TX */
888 	val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
889 			 tx_ring->obj_num - 1) |
890 		MCP251XFD_REG_FIFOCON_TXEN |
891 		MCP251XFD_REG_FIFOCON_TXATIE;
892 
893 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
894 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
895 				  MCP251XFD_REG_FIFOCON_PLSIZE_64);
896 	else
897 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
898 				  MCP251XFD_REG_FIFOCON_PLSIZE_8);
899 
900 	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
901 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
902 				  MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
903 	else
904 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
905 				  MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
906 
907 	err = regmap_write(priv->map_reg,
908 			   MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
909 			   val);
910 	if (err)
911 		return err;
912 
913 	/* RX FIFOs */
914 	mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
915 		err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
916 		if (err)
917 			return err;
918 
919 		err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
920 		if (err)
921 			return err;
922 	}
923 
924 	return 0;
925 }
926 
927 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
928 {
929 	struct mcp251xfd_ecc *ecc = &priv->ecc;
930 	void *ram;
931 	u32 val = 0;
932 	int err;
933 
934 	ecc->ecc_stat = 0;
935 
936 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
937 		val = MCP251XFD_REG_ECCCON_ECCEN;
938 
939 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
940 				 MCP251XFD_REG_ECCCON_ECCEN, val);
941 	if (err)
942 		return err;
943 
944 	ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
945 	if (!ram)
946 		return -ENOMEM;
947 
948 	err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
949 			       MCP251XFD_RAM_SIZE);
950 	kfree(ram);
951 
952 	return err;
953 }
954 
955 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
956 {
957 	struct mcp251xfd_ecc *ecc = &priv->ecc;
958 
959 	ecc->ecc_stat = 0;
960 }
961 
962 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
963 {
964 	u8 mode;
965 
966 
967 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
968 		mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
969 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
970 		mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
971 	else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
972 		mode = MCP251XFD_REG_CON_MODE_MIXED;
973 	else
974 		mode = MCP251XFD_REG_CON_MODE_CAN2_0;
975 
976 	return mode;
977 }
978 
979 static int
980 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
981 				 bool nowait)
982 {
983 	u8 mode;
984 
985 	mode = mcp251xfd_get_normal_mode(priv);
986 
987 	return __mcp251xfd_chip_set_mode(priv, mode, nowait);
988 }
989 
990 static inline int
991 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
992 {
993 	return __mcp251xfd_chip_set_normal_mode(priv, false);
994 }
995 
996 static inline int
997 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
998 {
999 	return __mcp251xfd_chip_set_normal_mode(priv, true);
1000 }
1001 
1002 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1003 {
1004 	u32 val;
1005 	int err;
1006 
1007 	val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1008 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1009 	if (err)
1010 		return err;
1011 
1012 	val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1013 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1014 	if (err)
1015 		return err;
1016 
1017 	val = MCP251XFD_REG_INT_CERRIE |
1018 		MCP251XFD_REG_INT_SERRIE |
1019 		MCP251XFD_REG_INT_RXOVIE |
1020 		MCP251XFD_REG_INT_TXATIE |
1021 		MCP251XFD_REG_INT_SPICRCIE |
1022 		MCP251XFD_REG_INT_ECCIE |
1023 		MCP251XFD_REG_INT_TEFIE |
1024 		MCP251XFD_REG_INT_MODIE |
1025 		MCP251XFD_REG_INT_RXIE;
1026 
1027 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1028 		val |= MCP251XFD_REG_INT_IVMIE;
1029 
1030 	return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1031 }
1032 
1033 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1034 {
1035 	int err;
1036 	u32 mask;
1037 
1038 	err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1039 	if (err)
1040 		return err;
1041 
1042 	mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1043 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1044 				 mask, 0x0);
1045 	if (err)
1046 		return err;
1047 
1048 	return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1049 }
1050 
1051 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1052 			       const enum can_state state)
1053 {
1054 	priv->can.state = state;
1055 
1056 	mcp251xfd_chip_interrupts_disable(priv);
1057 	mcp251xfd_chip_rx_int_disable(priv);
1058 	return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1059 }
1060 
1061 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1062 {
1063 	int err;
1064 
1065 	err = mcp251xfd_chip_softreset(priv);
1066 	if (err)
1067 		goto out_chip_stop;
1068 
1069 	err = mcp251xfd_chip_clock_init(priv);
1070 	if (err)
1071 		goto out_chip_stop;
1072 
1073 	err = mcp251xfd_set_bittiming(priv);
1074 	if (err)
1075 		goto out_chip_stop;
1076 
1077 	err = mcp251xfd_chip_rx_int_enable(priv);
1078 	if (err)
1079 		return err;
1080 
1081 	err = mcp251xfd_chip_ecc_init(priv);
1082 	if (err)
1083 		goto out_chip_stop;
1084 
1085 	mcp251xfd_ring_init(priv);
1086 
1087 	err = mcp251xfd_chip_fifo_init(priv);
1088 	if (err)
1089 		goto out_chip_stop;
1090 
1091 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1092 
1093 	err = mcp251xfd_chip_set_normal_mode(priv);
1094 	if (err)
1095 		goto out_chip_stop;
1096 
1097 	return 0;
1098 
1099  out_chip_stop:
1100 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1101 
1102 	return err;
1103 }
1104 
1105 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1106 {
1107 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
1108 	int err;
1109 
1110 	switch (mode) {
1111 	case CAN_MODE_START:
1112 		err = mcp251xfd_chip_start(priv);
1113 		if (err)
1114 			return err;
1115 
1116 		err = mcp251xfd_chip_interrupts_enable(priv);
1117 		if (err) {
1118 			mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1119 			return err;
1120 		}
1121 
1122 		netif_wake_queue(ndev);
1123 		break;
1124 
1125 	default:
1126 		return -EOPNOTSUPP;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1133 					struct can_berr_counter *bec)
1134 {
1135 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1136 	u32 trec;
1137 	int err;
1138 
1139 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1140 	if (err)
1141 		return err;
1142 
1143 	if (trec & MCP251XFD_REG_TREC_TXBO)
1144 		bec->txerr = 256;
1145 	else
1146 		bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1147 	bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1148 
1149 	return 0;
1150 }
1151 
1152 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1153 				      struct can_berr_counter *bec)
1154 {
1155 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1156 
1157 	/* Avoid waking up the controller if the interface is down */
1158 	if (!(ndev->flags & IFF_UP))
1159 		return 0;
1160 
1161 	/* The controller is powered down during Bus Off, use saved
1162 	 * bec values.
1163 	 */
1164 	if (priv->can.state == CAN_STATE_BUS_OFF) {
1165 		*bec = priv->bec;
1166 		return 0;
1167 	}
1168 
1169 	return __mcp251xfd_get_berr_counter(ndev, bec);
1170 }
1171 
1172 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1173 {
1174 	u8 tef_tail_chip, tef_tail;
1175 	int err;
1176 
1177 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1178 		return 0;
1179 
1180 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1181 	if (err)
1182 		return err;
1183 
1184 	tef_tail = mcp251xfd_get_tef_tail(priv);
1185 	if (tef_tail_chip != tef_tail) {
1186 		netdev_err(priv->ndev,
1187 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1188 			   tef_tail_chip, tef_tail);
1189 		return -EILSEQ;
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int
1196 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1197 			const struct mcp251xfd_rx_ring *ring)
1198 {
1199 	u8 rx_tail_chip, rx_tail;
1200 	int err;
1201 
1202 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1203 		return 0;
1204 
1205 	err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1206 	if (err)
1207 		return err;
1208 
1209 	rx_tail = mcp251xfd_get_rx_tail(ring);
1210 	if (rx_tail_chip != rx_tail) {
1211 		netdev_err(priv->ndev,
1212 			   "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1213 			   rx_tail_chip, rx_tail);
1214 		return -EILSEQ;
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 static int
1221 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1222 {
1223 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1224 	u32 tef_sta;
1225 	int err;
1226 
1227 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1228 	if (err)
1229 		return err;
1230 
1231 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1232 		netdev_err(priv->ndev,
1233 			   "Transmit Event FIFO buffer overflow.\n");
1234 		return -ENOBUFS;
1235 	}
1236 
1237 	netdev_info(priv->ndev,
1238 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
1239 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1240 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1241 		    "not empty" : "empty",
1242 		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1243 
1244 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
1245 	return -EAGAIN;
1246 }
1247 
1248 static int
1249 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1250 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
1251 {
1252 	struct net_device_stats *stats = &priv->ndev->stats;
1253 	u32 seq, seq_masked, tef_tail_masked;
1254 
1255 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1256 			hw_tef_obj->flags);
1257 
1258 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
1259 	 * compare 7 bits, this should be enough to detect
1260 	 * net-yet-completed, i.e. old TEF objects.
1261 	 */
1262 	seq_masked = seq &
1263 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1264 	tef_tail_masked = priv->tef->tail &
1265 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1266 	if (seq_masked != tef_tail_masked)
1267 		return mcp251xfd_handle_tefif_recover(priv, seq);
1268 
1269 	stats->tx_bytes +=
1270 		can_rx_offload_get_echo_skb(&priv->offload,
1271 					    mcp251xfd_get_tef_tail(priv),
1272 					    hw_tef_obj->ts, NULL);
1273 	stats->tx_packets++;
1274 	priv->tef->tail++;
1275 
1276 	return 0;
1277 }
1278 
1279 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1280 {
1281 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1282 	unsigned int new_head;
1283 	u8 chip_tx_tail;
1284 	int err;
1285 
1286 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1287 	if (err)
1288 		return err;
1289 
1290 	/* chip_tx_tail, is the next TX-Object send by the HW.
1291 	 * The new TEF head must be >= the old head, ...
1292 	 */
1293 	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1294 	if (new_head <= priv->tef->head)
1295 		new_head += tx_ring->obj_num;
1296 
1297 	/* ... but it cannot exceed the TX head. */
1298 	priv->tef->head = min(new_head, tx_ring->head);
1299 
1300 	return mcp251xfd_check_tef_tail(priv);
1301 }
1302 
1303 static inline int
1304 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1305 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1306 		       const u8 offset, const u8 len)
1307 {
1308 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1309 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1310 
1311 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1312 	    (offset > tx_ring->obj_num ||
1313 	     len > tx_ring->obj_num ||
1314 	     offset + len > tx_ring->obj_num)) {
1315 		netdev_err(priv->ndev,
1316 			   "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1317 			   tx_ring->obj_num, offset, len);
1318 		return -ERANGE;
1319 	}
1320 
1321 	return regmap_bulk_read(priv->map_rx,
1322 				mcp251xfd_get_tef_obj_addr(offset),
1323 				hw_tef_obj,
1324 				sizeof(*hw_tef_obj) / val_bytes * len);
1325 }
1326 
1327 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1328 {
1329 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1330 	u8 tef_tail, len, l;
1331 	int err, i;
1332 
1333 	err = mcp251xfd_tef_ring_update(priv);
1334 	if (err)
1335 		return err;
1336 
1337 	tef_tail = mcp251xfd_get_tef_tail(priv);
1338 	len = mcp251xfd_get_tef_len(priv);
1339 	l = mcp251xfd_get_tef_linear_len(priv);
1340 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1341 	if (err)
1342 		return err;
1343 
1344 	if (l < len) {
1345 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1346 		if (err)
1347 			return err;
1348 	}
1349 
1350 	for (i = 0; i < len; i++) {
1351 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
1352 		/* -EAGAIN means the Sequence Number in the TEF
1353 		 * doesn't match our tef_tail. This can happen if we
1354 		 * read the TEF objects too early. Leave loop let the
1355 		 * interrupt handler call us again.
1356 		 */
1357 		if (err == -EAGAIN)
1358 			goto out_netif_wake_queue;
1359 		if (err)
1360 			return err;
1361 	}
1362 
1363  out_netif_wake_queue:
1364 	len = i;	/* number of handled goods TEFs */
1365 	if (len) {
1366 		struct mcp251xfd_tef_ring *ring = priv->tef;
1367 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1368 		struct spi_transfer *last_xfer;
1369 
1370 		/* Increment the TEF FIFO tail pointer 'len' times in
1371 		 * a single SPI message.
1372 		 *
1373 		 * Note:
1374 		 *
1375 		 * "cs_change == 1" on the last transfer results in an
1376 		 * active chip select after the complete SPI
1377 		 * message. This causes the controller to interpret
1378 		 * the next register access as data. Temporary set
1379 		 * "cs_change" of the last transfer to "0" to properly
1380 		 * deactivate the chip select at the end of the
1381 		 * message.
1382 		 */
1383 		last_xfer = &ring->uinc_xfer[len - 1];
1384 		last_xfer->cs_change = 0;
1385 		err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
1386 		last_xfer->cs_change = 1;
1387 		if (err)
1388 			return err;
1389 
1390 		tx_ring->tail += len;
1391 
1392 		err = mcp251xfd_check_tef_tail(priv);
1393 		if (err)
1394 			return err;
1395 	}
1396 
1397 	mcp251xfd_ecc_tefif_successful(priv);
1398 
1399 	if (mcp251xfd_get_tx_free(priv->tx)) {
1400 		/* Make sure that anybody stopping the queue after
1401 		 * this sees the new tx_ring->tail.
1402 		 */
1403 		smp_mb();
1404 		netif_wake_queue(priv->ndev);
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 static int
1411 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1412 			 struct mcp251xfd_rx_ring *ring)
1413 {
1414 	u32 new_head;
1415 	u8 chip_rx_head;
1416 	int err;
1417 
1418 	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1419 	if (err)
1420 		return err;
1421 
1422 	/* chip_rx_head, is the next RX-Object filled by the HW.
1423 	 * The new RX head must be >= the old head.
1424 	 */
1425 	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1426 	if (new_head <= ring->head)
1427 		new_head += ring->obj_num;
1428 
1429 	ring->head = new_head;
1430 
1431 	return mcp251xfd_check_rx_tail(priv, ring);
1432 }
1433 
1434 static void
1435 mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
1436 			   const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1437 			   struct sk_buff *skb)
1438 {
1439 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1440 	u8 dlc;
1441 
1442 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1443 		u32 sid, eid;
1444 
1445 		eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1446 		sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1447 
1448 		cfd->can_id = CAN_EFF_FLAG |
1449 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1450 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1451 	} else {
1452 		cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1453 					hw_rx_obj->id);
1454 	}
1455 
1456 	dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
1457 
1458 	/* CANFD */
1459 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1460 
1461 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1462 			cfd->flags |= CANFD_ESI;
1463 
1464 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1465 			cfd->flags |= CANFD_BRS;
1466 
1467 		cfd->len = can_fd_dlc2len(dlc);
1468 	} else {
1469 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1470 			cfd->can_id |= CAN_RTR_FLAG;
1471 
1472 		can_frame_set_cc_len((struct can_frame *)cfd, dlc,
1473 				     priv->can.ctrlmode);
1474 	}
1475 
1476 	if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
1477 		memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1478 }
1479 
1480 static int
1481 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1482 			  struct mcp251xfd_rx_ring *ring,
1483 			  const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1484 {
1485 	struct net_device_stats *stats = &priv->ndev->stats;
1486 	struct sk_buff *skb;
1487 	struct canfd_frame *cfd;
1488 	int err;
1489 
1490 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1491 		skb = alloc_canfd_skb(priv->ndev, &cfd);
1492 	else
1493 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1494 
1495 	if (!skb) {
1496 		stats->rx_dropped++;
1497 		return 0;
1498 	}
1499 
1500 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1501 	err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1502 	if (err)
1503 		stats->rx_fifo_errors++;
1504 
1505 	return 0;
1506 }
1507 
1508 static inline int
1509 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1510 		      const struct mcp251xfd_rx_ring *ring,
1511 		      struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1512 		      const u8 offset, const u8 len)
1513 {
1514 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1515 	int err;
1516 
1517 	err = regmap_bulk_read(priv->map_rx,
1518 			       mcp251xfd_get_rx_obj_addr(ring, offset),
1519 			       hw_rx_obj,
1520 			       len * ring->obj_size / val_bytes);
1521 
1522 	return err;
1523 }
1524 
1525 static int
1526 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1527 			   struct mcp251xfd_rx_ring *ring)
1528 {
1529 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1530 	u8 rx_tail, len;
1531 	int err, i;
1532 
1533 	err = mcp251xfd_rx_ring_update(priv, ring);
1534 	if (err)
1535 		return err;
1536 
1537 	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1538 		struct spi_transfer *last_xfer;
1539 
1540 		rx_tail = mcp251xfd_get_rx_tail(ring);
1541 
1542 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1543 					    rx_tail, len);
1544 		if (err)
1545 			return err;
1546 
1547 		for (i = 0; i < len; i++) {
1548 			err = mcp251xfd_handle_rxif_one(priv, ring,
1549 							(void *)hw_rx_obj +
1550 							i * ring->obj_size);
1551 			if (err)
1552 				return err;
1553 		}
1554 
1555 		/* Increment the RX FIFO tail pointer 'len' times in a
1556 		 * single SPI message.
1557 		 *
1558 		 * Note:
1559 		 *
1560 		 * "cs_change == 1" on the last transfer results in an
1561 		 * active chip select after the complete SPI
1562 		 * message. This causes the controller to interpret
1563 		 * the next register access as data. Temporary set
1564 		 * "cs_change" of the last transfer to "0" to properly
1565 		 * deactivate the chip select at the end of the
1566 		 * message.
1567 		 */
1568 		last_xfer = &ring->uinc_xfer[len - 1];
1569 		last_xfer->cs_change = 0;
1570 		err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
1571 		last_xfer->cs_change = 1;
1572 		if (err)
1573 			return err;
1574 
1575 		ring->tail += len;
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1582 {
1583 	struct mcp251xfd_rx_ring *ring;
1584 	int err, n;
1585 
1586 	mcp251xfd_for_each_rx_ring(priv, ring, n) {
1587 		err = mcp251xfd_handle_rxif_ring(priv, ring);
1588 		if (err)
1589 			return err;
1590 	}
1591 
1592 	return 0;
1593 }
1594 
1595 static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
1596 					  u32 *timestamp)
1597 {
1598 	return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
1599 }
1600 
1601 static struct sk_buff *
1602 mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
1603 			    struct can_frame **cf, u32 *timestamp)
1604 {
1605 	int err;
1606 
1607 	err = mcp251xfd_get_timestamp(priv, timestamp);
1608 	if (err)
1609 		return NULL;
1610 
1611 	return alloc_can_err_skb(priv->ndev, cf);
1612 }
1613 
1614 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1615 {
1616 	struct net_device_stats *stats = &priv->ndev->stats;
1617 	struct mcp251xfd_rx_ring *ring;
1618 	struct sk_buff *skb;
1619 	struct can_frame *cf;
1620 	u32 timestamp, rxovif;
1621 	int err, i;
1622 
1623 	stats->rx_over_errors++;
1624 	stats->rx_errors++;
1625 
1626 	err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1627 	if (err)
1628 		return err;
1629 
1630 	mcp251xfd_for_each_rx_ring(priv, ring, i) {
1631 		if (!(rxovif & BIT(ring->fifo_nr)))
1632 			continue;
1633 
1634 		/* If SERRIF is active, there was a RX MAB overflow. */
1635 		if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1636 			netdev_info(priv->ndev,
1637 				    "RX-%d: MAB overflow detected.\n",
1638 				    ring->nr);
1639 		} else {
1640 			netdev_info(priv->ndev,
1641 				    "RX-%d: FIFO overflow.\n", ring->nr);
1642 		}
1643 
1644 		err = regmap_update_bits(priv->map_reg,
1645 					 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1646 					 MCP251XFD_REG_FIFOSTA_RXOVIF,
1647 					 0x0);
1648 		if (err)
1649 			return err;
1650 	}
1651 
1652 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1653 	if (!skb)
1654 		return 0;
1655 
1656 	cf->can_id |= CAN_ERR_CRTL;
1657 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1658 
1659 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1660 	if (err)
1661 		stats->rx_fifo_errors++;
1662 
1663 	return 0;
1664 }
1665 
1666 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1667 {
1668 	netdev_info(priv->ndev, "%s\n", __func__);
1669 
1670 	return 0;
1671 }
1672 
1673 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1674 {
1675 	struct net_device_stats *stats = &priv->ndev->stats;
1676 	u32 bdiag1, timestamp;
1677 	struct sk_buff *skb;
1678 	struct can_frame *cf = NULL;
1679 	int err;
1680 
1681 	err = mcp251xfd_get_timestamp(priv, &timestamp);
1682 	if (err)
1683 		return err;
1684 
1685 	err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1686 	if (err)
1687 		return err;
1688 
1689 	/* Write 0s to clear error bits, don't write 1s to non active
1690 	 * bits, as they will be set.
1691 	 */
1692 	err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1693 	if (err)
1694 		return err;
1695 
1696 	priv->can.can_stats.bus_error++;
1697 
1698 	skb = alloc_can_err_skb(priv->ndev, &cf);
1699 	if (cf)
1700 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1701 
1702 	/* Controller misconfiguration */
1703 	if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1704 		netdev_err(priv->ndev,
1705 			   "recv'd DLC is larger than PLSIZE of FIFO element.");
1706 
1707 	/* RX errors */
1708 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1709 		      MCP251XFD_REG_BDIAG1_NCRCERR)) {
1710 		netdev_dbg(priv->ndev, "CRC error\n");
1711 
1712 		stats->rx_errors++;
1713 		if (cf)
1714 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1715 	}
1716 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1717 		      MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1718 		netdev_dbg(priv->ndev, "Stuff error\n");
1719 
1720 		stats->rx_errors++;
1721 		if (cf)
1722 			cf->data[2] |= CAN_ERR_PROT_STUFF;
1723 	}
1724 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1725 		      MCP251XFD_REG_BDIAG1_NFORMERR)) {
1726 		netdev_dbg(priv->ndev, "Format error\n");
1727 
1728 		stats->rx_errors++;
1729 		if (cf)
1730 			cf->data[2] |= CAN_ERR_PROT_FORM;
1731 	}
1732 
1733 	/* TX errors */
1734 	if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1735 		netdev_dbg(priv->ndev, "NACK error\n");
1736 
1737 		stats->tx_errors++;
1738 		if (cf) {
1739 			cf->can_id |= CAN_ERR_ACK;
1740 			cf->data[2] |= CAN_ERR_PROT_TX;
1741 		}
1742 	}
1743 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1744 		      MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1745 		netdev_dbg(priv->ndev, "Bit1 error\n");
1746 
1747 		stats->tx_errors++;
1748 		if (cf)
1749 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1750 	}
1751 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1752 		      MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1753 		netdev_dbg(priv->ndev, "Bit0 error\n");
1754 
1755 		stats->tx_errors++;
1756 		if (cf)
1757 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1758 	}
1759 
1760 	if (!cf)
1761 		return 0;
1762 
1763 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1764 	if (err)
1765 		stats->rx_fifo_errors++;
1766 
1767 	return 0;
1768 }
1769 
1770 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1771 {
1772 	struct net_device_stats *stats = &priv->ndev->stats;
1773 	struct sk_buff *skb;
1774 	struct can_frame *cf = NULL;
1775 	enum can_state new_state, rx_state, tx_state;
1776 	u32 trec, timestamp;
1777 	int err;
1778 
1779 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1780 	if (err)
1781 		return err;
1782 
1783 	if (trec & MCP251XFD_REG_TREC_TXBO)
1784 		tx_state = CAN_STATE_BUS_OFF;
1785 	else if (trec & MCP251XFD_REG_TREC_TXBP)
1786 		tx_state = CAN_STATE_ERROR_PASSIVE;
1787 	else if (trec & MCP251XFD_REG_TREC_TXWARN)
1788 		tx_state = CAN_STATE_ERROR_WARNING;
1789 	else
1790 		tx_state = CAN_STATE_ERROR_ACTIVE;
1791 
1792 	if (trec & MCP251XFD_REG_TREC_RXBP)
1793 		rx_state = CAN_STATE_ERROR_PASSIVE;
1794 	else if (trec & MCP251XFD_REG_TREC_RXWARN)
1795 		rx_state = CAN_STATE_ERROR_WARNING;
1796 	else
1797 		rx_state = CAN_STATE_ERROR_ACTIVE;
1798 
1799 	new_state = max(tx_state, rx_state);
1800 	if (new_state == priv->can.state)
1801 		return 0;
1802 
1803 	/* The skb allocation might fail, but can_change_state()
1804 	 * handles cf == NULL.
1805 	 */
1806 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1807 	can_change_state(priv->ndev, cf, tx_state, rx_state);
1808 
1809 	if (new_state == CAN_STATE_BUS_OFF) {
1810 		/* As we're going to switch off the chip now, let's
1811 		 * save the error counters and return them to
1812 		 * userspace, if do_get_berr_counter() is called while
1813 		 * the chip is in Bus Off.
1814 		 */
1815 		err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1816 		if (err)
1817 			return err;
1818 
1819 		mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1820 		can_bus_off(priv->ndev);
1821 	}
1822 
1823 	if (!skb)
1824 		return 0;
1825 
1826 	if (new_state != CAN_STATE_BUS_OFF) {
1827 		struct can_berr_counter bec;
1828 
1829 		err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1830 		if (err)
1831 			return err;
1832 		cf->data[6] = bec.txerr;
1833 		cf->data[7] = bec.rxerr;
1834 	}
1835 
1836 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1837 	if (err)
1838 		stats->rx_fifo_errors++;
1839 
1840 	return 0;
1841 }
1842 
1843 static int
1844 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1845 {
1846 	const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1847 	u8 mode;
1848 	int err;
1849 
1850 	err = mcp251xfd_chip_get_mode(priv, &mode);
1851 	if (err)
1852 		return err;
1853 
1854 	if (mode == mode_reference) {
1855 		netdev_dbg(priv->ndev,
1856 			   "Controller changed into %s Mode (%u).\n",
1857 			   mcp251xfd_get_mode_str(mode), mode);
1858 		return 0;
1859 	}
1860 
1861 	/* According to MCP2517FD errata DS80000792B 1., during a TX
1862 	 * MAB underflow, the controller will transition to Restricted
1863 	 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1864 	 *
1865 	 * However this is not always the case. If SERR2LOM is
1866 	 * configured for Restricted Operation Mode (SERR2LOM not set)
1867 	 * the MCP2517FD will sometimes transition to Listen Only Mode
1868 	 * first. When polling this bit we see that it will transition
1869 	 * to Restricted Operation Mode shortly after.
1870 	 */
1871 	if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1872 	    (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1873 	     mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1874 		netdev_dbg(priv->ndev,
1875 			   "Controller changed into %s Mode (%u).\n",
1876 			   mcp251xfd_get_mode_str(mode), mode);
1877 	else
1878 		netdev_err(priv->ndev,
1879 			   "Controller changed into %s Mode (%u).\n",
1880 			   mcp251xfd_get_mode_str(mode), mode);
1881 
1882 	/* After the application requests Normal mode, the controller
1883 	 * will automatically attempt to retransmit the message that
1884 	 * caused the TX MAB underflow.
1885 	 *
1886 	 * However, if there is an ECC error in the TX-RAM, we first
1887 	 * have to reload the tx-object before requesting Normal
1888 	 * mode. This is done later in mcp251xfd_handle_eccif().
1889 	 */
1890 	if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1891 		*set_normal_mode = true;
1892 		return 0;
1893 	}
1894 
1895 	return mcp251xfd_chip_set_normal_mode_nowait(priv);
1896 }
1897 
1898 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1899 {
1900 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1901 	struct net_device_stats *stats = &priv->ndev->stats;
1902 	bool handled = false;
1903 
1904 	/* TX MAB underflow
1905 	 *
1906 	 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1907 	 * underflow is indicated by SERRIF and MODIF.
1908 	 *
1909 	 * In addition to the effects mentioned in the Errata, there
1910 	 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1911 	 * will be seen as well.
1912 	 *
1913 	 * Sometimes there is an ECC error in the TX-RAM, which leads
1914 	 * to a TX MAB underflow.
1915 	 *
1916 	 * However, probably due to a race condition, there is no
1917 	 * associated MODIF pending.
1918 	 *
1919 	 * Further, there are situations, where the SERRIF is caused
1920 	 * by an ECC error in the TX-RAM, but not even the ECCIF is
1921 	 * set. This only seems to happen _after_ the first occurrence
1922 	 * of a ECCIF (which is tracked in ecc->cnt).
1923 	 *
1924 	 * Treat all as a known system errors..
1925 	 */
1926 	if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1927 	     priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1928 	    priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1929 	    ecc->cnt) {
1930 		const char *msg;
1931 
1932 		if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1933 		    ecc->cnt)
1934 			msg = "TX MAB underflow due to ECC error detected.";
1935 		else
1936 			msg = "TX MAB underflow detected.";
1937 
1938 		if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1939 			netdev_dbg(priv->ndev, "%s\n", msg);
1940 		else
1941 			netdev_info(priv->ndev, "%s\n", msg);
1942 
1943 		stats->tx_aborted_errors++;
1944 		stats->tx_errors++;
1945 		handled = true;
1946 	}
1947 
1948 	/* RX MAB overflow
1949 	 *
1950 	 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1951 	 * overflow is indicated by SERRIF.
1952 	 *
1953 	 * In addition to the effects mentioned in the Errata, (most
1954 	 * of the times) a RXOVIF is raised, if the FIFO that is being
1955 	 * received into has the RXOVIE activated (and we have enabled
1956 	 * RXOVIE on all FIFOs).
1957 	 *
1958 	 * Sometimes there is no RXOVIF just a RXIF is pending.
1959 	 *
1960 	 * Treat all as a known system errors..
1961 	 */
1962 	if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1963 	    priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1964 		stats->rx_dropped++;
1965 		handled = true;
1966 	}
1967 
1968 	if (!handled)
1969 		netdev_err(priv->ndev,
1970 			   "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1971 			   priv->regs_status.intf);
1972 
1973 	return 0;
1974 }
1975 
1976 static int
1977 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1978 {
1979 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1980 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1981 	struct mcp251xfd_tx_obj *tx_obj;
1982 	u8 chip_tx_tail, tx_tail, offset;
1983 	u16 addr;
1984 	int err;
1985 
1986 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
1987 
1988 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1989 	if (err)
1990 		return err;
1991 
1992 	tx_tail = mcp251xfd_get_tx_tail(tx_ring);
1993 	offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
1994 
1995 	/* Bail out if one of the following is met:
1996 	 * - tx_tail information is inconsistent
1997 	 * - for mcp2517fd: offset not 0
1998 	 * - for mcp2518fd: offset not 0 or 1
1999 	 */
2000 	if (chip_tx_tail != tx_tail ||
2001 	    !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
2002 		netdev_err(priv->ndev,
2003 			   "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2004 			   addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2005 			   offset);
2006 		return -EINVAL;
2007 	}
2008 
2009 	netdev_info(priv->ndev,
2010 		    "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2011 		    ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2012 		    "Single" : "Double",
2013 		    addr, nr, tx_ring->tail, tx_tail, offset);
2014 
2015 	/* reload tx_obj into controller RAM ... */
2016 	tx_obj = &tx_ring->obj[nr];
2017 	err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2018 	if (err)
2019 		return err;
2020 
2021 	/* ... and trigger retransmit */
2022 	return mcp251xfd_chip_set_normal_mode(priv);
2023 }
2024 
2025 static int
2026 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2027 {
2028 	struct mcp251xfd_ecc *ecc = &priv->ecc;
2029 	const char *msg;
2030 	bool in_tx_ram;
2031 	u32 ecc_stat;
2032 	u16 addr;
2033 	u8 nr;
2034 	int err;
2035 
2036 	err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2037 	if (err)
2038 		return err;
2039 
2040 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2041 				 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2042 	if (err)
2043 		return err;
2044 
2045 	/* Check if ECC error occurred in TX-RAM */
2046 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2047 	err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2048 	if (!err)
2049 		in_tx_ram = true;
2050 	else if (err == -ENOENT)
2051 		in_tx_ram = false;
2052 	else
2053 		return err;
2054 
2055 	/* Errata Reference:
2056 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
2057 	 *
2058 	 * ECC single error correction does not work in all cases:
2059 	 *
2060 	 * Fix/Work Around:
2061 	 * Enable single error correction and double error detection
2062 	 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
2063 	 * detection interrupt and do not rely on the error
2064 	 * correction. Instead, handle both interrupts as a
2065 	 * notification that the RAM word at ERRADDR was corrupted.
2066 	 */
2067 	if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2068 		msg = "Single ECC Error detected at address";
2069 	else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2070 		msg = "Double ECC Error detected at address";
2071 	else
2072 		return -EINVAL;
2073 
2074 	if (!in_tx_ram) {
2075 		ecc->ecc_stat = 0;
2076 
2077 		netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2078 	} else {
2079 		/* Re-occurring error? */
2080 		if (ecc->ecc_stat == ecc_stat) {
2081 			ecc->cnt++;
2082 		} else {
2083 			ecc->ecc_stat = ecc_stat;
2084 			ecc->cnt = 1;
2085 		}
2086 
2087 		netdev_info(priv->ndev,
2088 			    "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2089 			    msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2090 
2091 		if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2092 			return mcp251xfd_handle_eccif_recover(priv, nr);
2093 	}
2094 
2095 	if (set_normal_mode)
2096 		return mcp251xfd_chip_set_normal_mode_nowait(priv);
2097 
2098 	return 0;
2099 }
2100 
2101 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2102 {
2103 	int err;
2104 	u32 crc;
2105 
2106 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2107 	if (err)
2108 		return err;
2109 
2110 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2111 				 MCP251XFD_REG_CRC_IF_MASK,
2112 				 ~crc);
2113 	if (err)
2114 		return err;
2115 
2116 	if (crc & MCP251XFD_REG_CRC_FERRIF)
2117 		netdev_notice(priv->ndev, "CRC write command format error.\n");
2118 	else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2119 		netdev_notice(priv->ndev,
2120 			      "CRC write error detected. CRC=0x%04lx.\n",
2121 			      FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2122 
2123 	return 0;
2124 }
2125 
2126 #define mcp251xfd_handle(priv, irq, ...) \
2127 ({ \
2128 	struct mcp251xfd_priv *_priv = (priv); \
2129 	int err; \
2130 \
2131 	err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2132 	if (err) \
2133 		netdev_err(_priv->ndev, \
2134 			"IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2135 			__stringify(irq), err); \
2136 	err; \
2137 })
2138 
2139 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2140 {
2141 	struct mcp251xfd_priv *priv = dev_id;
2142 	const int val_bytes = regmap_get_val_bytes(priv->map_reg);
2143 	irqreturn_t handled = IRQ_NONE;
2144 	int err;
2145 
2146 	if (priv->rx_int)
2147 		do {
2148 			int rx_pending;
2149 
2150 			rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2151 			if (!rx_pending)
2152 				break;
2153 
2154 			err = mcp251xfd_handle(priv, rxif);
2155 			if (err)
2156 				goto out_fail;
2157 
2158 			handled = IRQ_HANDLED;
2159 		} while (1);
2160 
2161 	do {
2162 		u32 intf_pending, intf_pending_clearable;
2163 		bool set_normal_mode = false;
2164 
2165 		err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2166 				       &priv->regs_status,
2167 				       sizeof(priv->regs_status) /
2168 				       val_bytes);
2169 		if (err)
2170 			goto out_fail;
2171 
2172 		intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2173 					 priv->regs_status.intf) &
2174 			FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2175 				  priv->regs_status.intf);
2176 
2177 		if (!(intf_pending))
2178 			return handled;
2179 
2180 		/* Some interrupts must be ACKed in the
2181 		 * MCP251XFD_REG_INT register.
2182 		 * - First ACK then handle, to avoid lost-IRQ race
2183 		 *   condition on fast re-occurring interrupts.
2184 		 * - Write "0" to clear active IRQs, "1" to all other,
2185 		 *   to avoid r/m/w race condition on the
2186 		 *   MCP251XFD_REG_INT register.
2187 		 */
2188 		intf_pending_clearable = intf_pending &
2189 			MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2190 		if (intf_pending_clearable) {
2191 			err = regmap_update_bits(priv->map_reg,
2192 						 MCP251XFD_REG_INT,
2193 						 MCP251XFD_REG_INT_IF_MASK,
2194 						 ~intf_pending_clearable);
2195 			if (err)
2196 				goto out_fail;
2197 		}
2198 
2199 		if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2200 			err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2201 			if (err)
2202 				goto out_fail;
2203 		}
2204 
2205 		if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2206 			err = mcp251xfd_handle(priv, rxif);
2207 			if (err)
2208 				goto out_fail;
2209 		}
2210 
2211 		if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2212 			err = mcp251xfd_handle(priv, tefif);
2213 			if (err)
2214 				goto out_fail;
2215 		}
2216 
2217 		if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2218 			err = mcp251xfd_handle(priv, rxovif);
2219 			if (err)
2220 				goto out_fail;
2221 		}
2222 
2223 		if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2224 			err = mcp251xfd_handle(priv, txatif);
2225 			if (err)
2226 				goto out_fail;
2227 		}
2228 
2229 		if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2230 			err = mcp251xfd_handle(priv, ivmif);
2231 			if (err)
2232 				goto out_fail;
2233 		}
2234 
2235 		if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2236 			err = mcp251xfd_handle(priv, serrif);
2237 			if (err)
2238 				goto out_fail;
2239 		}
2240 
2241 		if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2242 			err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2243 			if (err)
2244 				goto out_fail;
2245 		}
2246 
2247 		if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2248 			err = mcp251xfd_handle(priv, spicrcif);
2249 			if (err)
2250 				goto out_fail;
2251 		}
2252 
2253 		/* On the MCP2527FD and MCP2518FD, we don't get a
2254 		 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2255 		 * ERROR_ACTIVE.
2256 		 */
2257 		if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2258 		    priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2259 			err = mcp251xfd_handle(priv, cerrif);
2260 			if (err)
2261 				goto out_fail;
2262 
2263 			/* In Bus Off we completely shut down the
2264 			 * controller. Every subsequent register read
2265 			 * will read bogus data, and if
2266 			 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2267 			 * check will fail, too. So leave IRQ handler
2268 			 * directly.
2269 			 */
2270 			if (priv->can.state == CAN_STATE_BUS_OFF)
2271 				return IRQ_HANDLED;
2272 		}
2273 
2274 		handled = IRQ_HANDLED;
2275 	} while (1);
2276 
2277  out_fail:
2278 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2279 		   err, priv->regs_status.intf);
2280 	mcp251xfd_chip_interrupts_disable(priv);
2281 
2282 	return handled;
2283 }
2284 
2285 static inline struct
2286 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2287 {
2288 	u8 tx_head;
2289 
2290 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2291 
2292 	return &tx_ring->obj[tx_head];
2293 }
2294 
2295 static void
2296 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2297 			  struct mcp251xfd_tx_obj *tx_obj,
2298 			  const struct sk_buff *skb,
2299 			  unsigned int seq)
2300 {
2301 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2302 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2303 	union mcp251xfd_tx_obj_load_buf *load_buf;
2304 	u8 dlc;
2305 	u32 id, flags;
2306 	int len_sanitized = 0, len;
2307 
2308 	if (cfd->can_id & CAN_EFF_FLAG) {
2309 		u32 sid, eid;
2310 
2311 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2312 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2313 
2314 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2315 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2316 
2317 		flags = MCP251XFD_OBJ_FLAGS_IDE;
2318 	} else {
2319 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2320 		flags = 0;
2321 	}
2322 
2323 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2324 	 * harm, only the lower 7 bits will be transferred into the
2325 	 * TEF object.
2326 	 */
2327 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
2328 
2329 	if (cfd->can_id & CAN_RTR_FLAG)
2330 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
2331 	else
2332 		len_sanitized = canfd_sanitize_len(cfd->len);
2333 
2334 	/* CANFD */
2335 	if (can_is_canfd_skb(skb)) {
2336 		if (cfd->flags & CANFD_ESI)
2337 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
2338 
2339 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
2340 
2341 		if (cfd->flags & CANFD_BRS)
2342 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
2343 
2344 		dlc = can_fd_len2dlc(cfd->len);
2345 	} else {
2346 		dlc = can_get_cc_dlc((struct can_frame *)cfd,
2347 				     priv->can.ctrlmode);
2348 	}
2349 
2350 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
2351 
2352 	load_buf = &tx_obj->buf;
2353 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2354 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
2355 	else
2356 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2357 
2358 	put_unaligned_le32(id, &hw_tx_obj->id);
2359 	put_unaligned_le32(flags, &hw_tx_obj->flags);
2360 
2361 	/* Copy data */
2362 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2363 
2364 	/* Clear unused data at end of CAN frame */
2365 	if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
2366 		int pad_len;
2367 
2368 		pad_len = len_sanitized - cfd->len;
2369 		if (pad_len)
2370 			memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
2371 	}
2372 
2373 	/* Number of bytes to be written into the RAM of the controller */
2374 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2375 	if (MCP251XFD_SANITIZE_CAN)
2376 		len += round_up(len_sanitized, sizeof(u32));
2377 	else
2378 		len += round_up(cfd->len, sizeof(u32));
2379 
2380 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2381 		u16 crc;
2382 
2383 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2384 						     len);
2385 		/* CRC */
2386 		len += sizeof(load_buf->crc.cmd);
2387 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2388 		put_unaligned_be16(crc, (void *)load_buf + len);
2389 
2390 		/* Total length */
2391 		len += sizeof(load_buf->crc.crc);
2392 	} else {
2393 		len += sizeof(load_buf->nocrc.cmd);
2394 	}
2395 
2396 	tx_obj->xfer[0].len = len;
2397 }
2398 
2399 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2400 				  struct mcp251xfd_tx_obj *tx_obj)
2401 {
2402 	return spi_async(priv->spi, &tx_obj->msg);
2403 }
2404 
2405 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2406 			      struct mcp251xfd_tx_ring *tx_ring)
2407 {
2408 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
2409 		return false;
2410 
2411 	netif_stop_queue(priv->ndev);
2412 
2413 	/* Memory barrier before checking tx_free (head and tail) */
2414 	smp_mb();
2415 
2416 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2417 		netdev_dbg(priv->ndev,
2418 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2419 			   tx_ring->head, tx_ring->tail,
2420 			   tx_ring->head - tx_ring->tail);
2421 
2422 		return true;
2423 	}
2424 
2425 	netif_start_queue(priv->ndev);
2426 
2427 	return false;
2428 }
2429 
2430 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2431 					struct net_device *ndev)
2432 {
2433 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2434 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2435 	struct mcp251xfd_tx_obj *tx_obj;
2436 	u8 tx_head;
2437 	int err;
2438 
2439 	if (can_dropped_invalid_skb(ndev, skb))
2440 		return NETDEV_TX_OK;
2441 
2442 	if (mcp251xfd_tx_busy(priv, tx_ring))
2443 		return NETDEV_TX_BUSY;
2444 
2445 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2446 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2447 
2448 	/* Stop queue if we occupy the complete TX FIFO */
2449 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2450 	tx_ring->head++;
2451 	if (mcp251xfd_get_tx_free(tx_ring) == 0)
2452 		netif_stop_queue(ndev);
2453 
2454 	can_put_echo_skb(skb, ndev, tx_head, 0);
2455 
2456 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
2457 	if (err)
2458 		goto out_err;
2459 
2460 	return NETDEV_TX_OK;
2461 
2462  out_err:
2463 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2464 
2465 	return NETDEV_TX_OK;
2466 }
2467 
2468 static int mcp251xfd_open(struct net_device *ndev)
2469 {
2470 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2471 	const struct spi_device *spi = priv->spi;
2472 	int err;
2473 
2474 	err = pm_runtime_get_sync(ndev->dev.parent);
2475 	if (err < 0) {
2476 		pm_runtime_put_noidle(ndev->dev.parent);
2477 		return err;
2478 	}
2479 
2480 	err = open_candev(ndev);
2481 	if (err)
2482 		goto out_pm_runtime_put;
2483 
2484 	err = mcp251xfd_ring_alloc(priv);
2485 	if (err)
2486 		goto out_close_candev;
2487 
2488 	err = mcp251xfd_transceiver_enable(priv);
2489 	if (err)
2490 		goto out_mcp251xfd_ring_free;
2491 
2492 	err = mcp251xfd_chip_start(priv);
2493 	if (err)
2494 		goto out_transceiver_disable;
2495 
2496 	can_rx_offload_enable(&priv->offload);
2497 
2498 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2499 				   IRQF_ONESHOT, dev_name(&spi->dev),
2500 				   priv);
2501 	if (err)
2502 		goto out_can_rx_offload_disable;
2503 
2504 	err = mcp251xfd_chip_interrupts_enable(priv);
2505 	if (err)
2506 		goto out_free_irq;
2507 
2508 	netif_start_queue(ndev);
2509 
2510 	return 0;
2511 
2512  out_free_irq:
2513 	free_irq(spi->irq, priv);
2514  out_can_rx_offload_disable:
2515 	can_rx_offload_disable(&priv->offload);
2516  out_transceiver_disable:
2517 	mcp251xfd_transceiver_disable(priv);
2518  out_mcp251xfd_ring_free:
2519 	mcp251xfd_ring_free(priv);
2520  out_close_candev:
2521 	close_candev(ndev);
2522  out_pm_runtime_put:
2523 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2524 	pm_runtime_put(ndev->dev.parent);
2525 
2526 	return err;
2527 }
2528 
2529 static int mcp251xfd_stop(struct net_device *ndev)
2530 {
2531 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2532 
2533 	netif_stop_queue(ndev);
2534 	mcp251xfd_chip_interrupts_disable(priv);
2535 	free_irq(ndev->irq, priv);
2536 	can_rx_offload_disable(&priv->offload);
2537 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2538 	mcp251xfd_transceiver_disable(priv);
2539 	mcp251xfd_ring_free(priv);
2540 	close_candev(ndev);
2541 
2542 	pm_runtime_put(ndev->dev.parent);
2543 
2544 	return 0;
2545 }
2546 
2547 static const struct net_device_ops mcp251xfd_netdev_ops = {
2548 	.ndo_open = mcp251xfd_open,
2549 	.ndo_stop = mcp251xfd_stop,
2550 	.ndo_start_xmit	= mcp251xfd_start_xmit,
2551 	.ndo_change_mtu = can_change_mtu,
2552 };
2553 
2554 static void
2555 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2556 {
2557 	const struct spi_device *spi = priv->spi;
2558 	const struct spi_controller *ctlr = spi->controller;
2559 
2560 	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2561 		priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2562 }
2563 
2564 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2565 {
2566 	const struct net_device *ndev = priv->ndev;
2567 	const struct mcp251xfd_devtype_data *devtype_data;
2568 	u32 osc;
2569 	int err;
2570 
2571 	/* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2572 	 * autodetect the model.
2573 	 */
2574 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2575 				 MCP251XFD_REG_OSC_LPMEN,
2576 				 MCP251XFD_REG_OSC_LPMEN);
2577 	if (err)
2578 		return err;
2579 
2580 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2581 	if (err)
2582 		return err;
2583 
2584 	if (osc & MCP251XFD_REG_OSC_LPMEN)
2585 		devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2586 	else
2587 		devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2588 
2589 	if (!mcp251xfd_is_251X(priv) &&
2590 	    priv->devtype_data.model != devtype_data->model) {
2591 		netdev_info(ndev,
2592 			    "Detected %s, but firmware specifies a %s. Fixing up.",
2593 			    __mcp251xfd_get_model_str(devtype_data->model),
2594 			    mcp251xfd_get_model_str(priv));
2595 	}
2596 	priv->devtype_data = *devtype_data;
2597 
2598 	/* We need to preserve the Half Duplex Quirk. */
2599 	mcp251xfd_register_quirks(priv);
2600 
2601 	/* Re-init regmap with quirks of detected model. */
2602 	return mcp251xfd_regmap_init(priv);
2603 }
2604 
2605 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2606 {
2607 	int err, rx_pending;
2608 
2609 	if (!priv->rx_int)
2610 		return 0;
2611 
2612 	err = mcp251xfd_chip_rx_int_enable(priv);
2613 	if (err)
2614 		return err;
2615 
2616 	/* Check if RX_INT is properly working. The RX_INT should not
2617 	 * be active after a softreset.
2618 	 */
2619 	rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2620 
2621 	err = mcp251xfd_chip_rx_int_disable(priv);
2622 	if (err)
2623 		return err;
2624 
2625 	if (!rx_pending)
2626 		return 0;
2627 
2628 	netdev_info(priv->ndev,
2629 		    "RX_INT active after softreset, disabling RX_INT support.");
2630 	devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2631 	priv->rx_int = NULL;
2632 
2633 	return 0;
2634 }
2635 
2636 static int
2637 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2638 			      u32 *dev_id, u32 *effective_speed_hz)
2639 {
2640 	struct mcp251xfd_map_buf_nocrc *buf_rx;
2641 	struct mcp251xfd_map_buf_nocrc *buf_tx;
2642 	struct spi_transfer xfer[2] = { };
2643 	int err;
2644 
2645 	buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2646 	if (!buf_rx)
2647 		return -ENOMEM;
2648 
2649 	buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2650 	if (!buf_tx) {
2651 		err = -ENOMEM;
2652 		goto out_kfree_buf_rx;
2653 	}
2654 
2655 	xfer[0].tx_buf = buf_tx;
2656 	xfer[0].len = sizeof(buf_tx->cmd);
2657 	xfer[1].rx_buf = buf_rx->data;
2658 	xfer[1].len = sizeof(dev_id);
2659 
2660 	mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2661 	err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2662 	if (err)
2663 		goto out_kfree_buf_tx;
2664 
2665 	*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2666 	*effective_speed_hz = xfer->effective_speed_hz;
2667 
2668  out_kfree_buf_tx:
2669 	kfree(buf_tx);
2670  out_kfree_buf_rx:
2671 	kfree(buf_rx);
2672 
2673 	return 0;
2674 }
2675 
2676 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2677 	(priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2678 
2679 static int
2680 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2681 {
2682 	u32 dev_id, effective_speed_hz;
2683 	int err;
2684 
2685 	err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2686 					    &effective_speed_hz);
2687 	if (err)
2688 		return err;
2689 
2690 	netdev_info(priv->ndev,
2691 		    "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2692 		    mcp251xfd_get_model_str(priv),
2693 		    FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2694 		    FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2695 		    priv->rx_int ? '+' : '-',
2696 		    MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2697 		    MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2698 		    MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2699 		    MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2700 		    MCP251XFD_QUIRK_ACTIVE(ECC),
2701 		    MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2702 		    priv->can.clock.freq / 1000000,
2703 		    priv->can.clock.freq % 1000000 / 1000 / 10,
2704 		    priv->spi_max_speed_hz_orig / 1000000,
2705 		    priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2706 		    priv->spi->max_speed_hz / 1000000,
2707 		    priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2708 		    effective_speed_hz / 1000000,
2709 		    effective_speed_hz % 1000000 / 1000 / 10);
2710 
2711 	return 0;
2712 }
2713 
2714 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2715 {
2716 	struct net_device *ndev = priv->ndev;
2717 	int err;
2718 
2719 	err = mcp251xfd_clks_and_vdd_enable(priv);
2720 	if (err)
2721 		return err;
2722 
2723 	pm_runtime_get_noresume(ndev->dev.parent);
2724 	err = pm_runtime_set_active(ndev->dev.parent);
2725 	if (err)
2726 		goto out_runtime_put_noidle;
2727 	pm_runtime_enable(ndev->dev.parent);
2728 
2729 	mcp251xfd_register_quirks(priv);
2730 
2731 	err = mcp251xfd_chip_softreset(priv);
2732 	if (err == -ENODEV)
2733 		goto out_runtime_disable;
2734 	if (err)
2735 		goto out_chip_set_mode_sleep;
2736 
2737 	err = mcp251xfd_register_chip_detect(priv);
2738 	if (err)
2739 		goto out_chip_set_mode_sleep;
2740 
2741 	err = mcp251xfd_register_check_rx_int(priv);
2742 	if (err)
2743 		goto out_chip_set_mode_sleep;
2744 
2745 	err = register_candev(ndev);
2746 	if (err)
2747 		goto out_chip_set_mode_sleep;
2748 
2749 	err = mcp251xfd_register_done(priv);
2750 	if (err)
2751 		goto out_unregister_candev;
2752 
2753 	/* Put controller into sleep mode and let pm_runtime_put()
2754 	 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2755 	 * the clocks and vdd will stay powered.
2756 	 */
2757 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2758 	if (err)
2759 		goto out_unregister_candev;
2760 
2761 	pm_runtime_put(ndev->dev.parent);
2762 
2763 	return 0;
2764 
2765  out_unregister_candev:
2766 	unregister_candev(ndev);
2767  out_chip_set_mode_sleep:
2768 	mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2769  out_runtime_disable:
2770 	pm_runtime_disable(ndev->dev.parent);
2771  out_runtime_put_noidle:
2772 	pm_runtime_put_noidle(ndev->dev.parent);
2773 	mcp251xfd_clks_and_vdd_disable(priv);
2774 
2775 	return err;
2776 }
2777 
2778 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2779 {
2780 	struct net_device *ndev	= priv->ndev;
2781 
2782 	unregister_candev(ndev);
2783 
2784 	pm_runtime_get_sync(ndev->dev.parent);
2785 	pm_runtime_put_noidle(ndev->dev.parent);
2786 	mcp251xfd_clks_and_vdd_disable(priv);
2787 	pm_runtime_disable(ndev->dev.parent);
2788 }
2789 
2790 static const struct of_device_id mcp251xfd_of_match[] = {
2791 	{
2792 		.compatible = "microchip,mcp2517fd",
2793 		.data = &mcp251xfd_devtype_data_mcp2517fd,
2794 	}, {
2795 		.compatible = "microchip,mcp2518fd",
2796 		.data = &mcp251xfd_devtype_data_mcp2518fd,
2797 	}, {
2798 		.compatible = "microchip,mcp251xfd",
2799 		.data = &mcp251xfd_devtype_data_mcp251xfd,
2800 	}, {
2801 		/* sentinel */
2802 	},
2803 };
2804 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2805 
2806 static const struct spi_device_id mcp251xfd_id_table[] = {
2807 	{
2808 		.name = "mcp2517fd",
2809 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2810 	}, {
2811 		.name = "mcp2518fd",
2812 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2813 	}, {
2814 		.name = "mcp251xfd",
2815 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2816 	}, {
2817 		/* sentinel */
2818 	},
2819 };
2820 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2821 
2822 static int mcp251xfd_probe(struct spi_device *spi)
2823 {
2824 	const void *match;
2825 	struct net_device *ndev;
2826 	struct mcp251xfd_priv *priv;
2827 	struct gpio_desc *rx_int;
2828 	struct regulator *reg_vdd, *reg_xceiver;
2829 	struct clk *clk;
2830 	u32 freq;
2831 	int err;
2832 
2833 	if (!spi->irq)
2834 		return dev_err_probe(&spi->dev, -ENXIO,
2835 				     "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2836 
2837 	rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2838 					 GPIOD_IN);
2839 	if (IS_ERR(rx_int))
2840 		return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
2841 				     "Failed to get RX-INT!\n");
2842 
2843 	reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2844 	if (PTR_ERR(reg_vdd) == -ENODEV)
2845 		reg_vdd = NULL;
2846 	else if (IS_ERR(reg_vdd))
2847 		return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
2848 				     "Failed to get VDD regulator!\n");
2849 
2850 	reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2851 	if (PTR_ERR(reg_xceiver) == -ENODEV)
2852 		reg_xceiver = NULL;
2853 	else if (IS_ERR(reg_xceiver))
2854 		return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
2855 				     "Failed to get Transceiver regulator!\n");
2856 
2857 	clk = devm_clk_get(&spi->dev, NULL);
2858 	if (IS_ERR(clk))
2859 		dev_err_probe(&spi->dev, PTR_ERR(clk),
2860 			      "Failed to get Oscillator (clock)!\n");
2861 	freq = clk_get_rate(clk);
2862 
2863 	/* Sanity check */
2864 	if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2865 	    freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2866 		dev_err(&spi->dev,
2867 			"Oscillator frequency (%u Hz) is too low or high.\n",
2868 			freq);
2869 		return -ERANGE;
2870 	}
2871 
2872 	if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2873 		dev_err(&spi->dev,
2874 			"Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2875 			freq);
2876 		return -ERANGE;
2877 	}
2878 
2879 	ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2880 			    MCP251XFD_TX_OBJ_NUM_MAX);
2881 	if (!ndev)
2882 		return -ENOMEM;
2883 
2884 	SET_NETDEV_DEV(ndev, &spi->dev);
2885 
2886 	ndev->netdev_ops = &mcp251xfd_netdev_ops;
2887 	ndev->irq = spi->irq;
2888 	ndev->flags |= IFF_ECHO;
2889 
2890 	priv = netdev_priv(ndev);
2891 	spi_set_drvdata(spi, priv);
2892 	priv->can.clock.freq = freq;
2893 	priv->can.do_set_mode = mcp251xfd_set_mode;
2894 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2895 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2896 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2897 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2898 		CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
2899 		CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
2900 		CAN_CTRLMODE_CC_LEN8_DLC;
2901 	priv->ndev = ndev;
2902 	priv->spi = spi;
2903 	priv->rx_int = rx_int;
2904 	priv->clk = clk;
2905 	priv->reg_vdd = reg_vdd;
2906 	priv->reg_xceiver = reg_xceiver;
2907 
2908 	match = device_get_match_data(&spi->dev);
2909 	if (match)
2910 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2911 	else
2912 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2913 			spi_get_device_id(spi)->driver_data;
2914 
2915 	/* Errata Reference:
2916 	 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
2917 	 *
2918 	 * The SPI can write corrupted data to the RAM at fast SPI
2919 	 * speeds:
2920 	 *
2921 	 * Simultaneous activity on the CAN bus while writing data to
2922 	 * RAM via the SPI interface, with high SCK frequency, can
2923 	 * lead to corrupted data being written to RAM.
2924 	 *
2925 	 * Fix/Work Around:
2926 	 * Ensure that FSCK is less than or equal to 0.85 *
2927 	 * (FSYSCLK/2).
2928 	 *
2929 	 * Known good combinations are:
2930 	 *
2931 	 * MCP	ext-clk	SoC			SPI			SPI-clk		max-clk	parent-clk	config
2932 	 *
2933 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 8333333 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2934 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	16666667 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2935 	 * 2517	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2936 	 * 2518	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2937 	 * 2518	40 MHz	fsl,imx6dl		fsl,imx51-ecspi		15000000 Hz	 75.00%	 30000000 Hz	default
2938 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 8333333 Hz	 83.33%	 16666667 Hz	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2939 	 *
2940 	 */
2941 	priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2942 	spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2943 	spi->bits_per_word = 8;
2944 	spi->rt = true;
2945 	err = spi_setup(spi);
2946 	if (err)
2947 		goto out_free_candev;
2948 
2949 	err = mcp251xfd_regmap_init(priv);
2950 	if (err)
2951 		goto out_free_candev;
2952 
2953 	err = can_rx_offload_add_manual(ndev, &priv->offload,
2954 					MCP251XFD_NAPI_WEIGHT);
2955 	if (err)
2956 		goto out_free_candev;
2957 
2958 	err = mcp251xfd_register(priv);
2959 	if (err)
2960 		goto out_free_candev;
2961 
2962 	return 0;
2963 
2964  out_free_candev:
2965 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2966 
2967 	free_candev(ndev);
2968 
2969 	return err;
2970 }
2971 
2972 static int mcp251xfd_remove(struct spi_device *spi)
2973 {
2974 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
2975 	struct net_device *ndev = priv->ndev;
2976 
2977 	can_rx_offload_del(&priv->offload);
2978 	mcp251xfd_unregister(priv);
2979 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2980 	free_candev(ndev);
2981 
2982 	return 0;
2983 }
2984 
2985 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
2986 {
2987 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2988 
2989 	return mcp251xfd_clks_and_vdd_disable(priv);
2990 }
2991 
2992 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
2993 {
2994 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2995 
2996 	return mcp251xfd_clks_and_vdd_enable(priv);
2997 }
2998 
2999 static const struct dev_pm_ops mcp251xfd_pm_ops = {
3000 	SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
3001 			   mcp251xfd_runtime_resume, NULL)
3002 };
3003 
3004 static struct spi_driver mcp251xfd_driver = {
3005 	.driver = {
3006 		.name = DEVICE_NAME,
3007 		.pm = &mcp251xfd_pm_ops,
3008 		.of_match_table = mcp251xfd_of_match,
3009 	},
3010 	.probe = mcp251xfd_probe,
3011 	.remove = mcp251xfd_remove,
3012 	.id_table = mcp251xfd_id_table,
3013 };
3014 module_spi_driver(mcp251xfd_driver);
3015 
3016 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3017 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3018 MODULE_LICENSE("GPL v2");
3019