1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020 Pengutronix,
6 //                          Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/pm_runtime.h>
23 
24 #include <asm/unaligned.h>
25 
26 #include "mcp251xfd.h"
27 
28 #define DEVICE_NAME "mcp251xfd"
29 
30 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
31 	.quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
32 		MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
33 		MCP251XFD_QUIRK_ECC,
34 	.model = MCP251XFD_MODEL_MCP2517FD,
35 };
36 
37 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
38 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
39 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
40 	.model = MCP251XFD_MODEL_MCP2518FD,
41 };
42 
43 /* Autodetect model, start with CRC enabled. */
44 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
45 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
46 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
47 	.model = MCP251XFD_MODEL_MCP251XFD,
48 };
49 
50 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
51 	.name = DEVICE_NAME,
52 	.tseg1_min = 2,
53 	.tseg1_max = 256,
54 	.tseg2_min = 1,
55 	.tseg2_max = 128,
56 	.sjw_max = 128,
57 	.brp_min = 1,
58 	.brp_max = 256,
59 	.brp_inc = 1,
60 };
61 
62 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
63 	.name = DEVICE_NAME,
64 	.tseg1_min = 1,
65 	.tseg1_max = 32,
66 	.tseg2_min = 1,
67 	.tseg2_max = 16,
68 	.sjw_max = 16,
69 	.brp_min = 1,
70 	.brp_max = 256,
71 	.brp_inc = 1,
72 };
73 
74 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
75 {
76 	switch (model) {
77 	case MCP251XFD_MODEL_MCP2517FD:
78 		return "MCP2517FD";
79 	case MCP251XFD_MODEL_MCP2518FD:
80 		return "MCP2518FD";
81 	case MCP251XFD_MODEL_MCP251XFD:
82 		return "MCP251xFD";
83 	}
84 
85 	return "<unknown>";
86 }
87 
88 static inline const char *
89 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
90 {
91 	return __mcp251xfd_get_model_str(priv->devtype_data.model);
92 }
93 
94 static const char *mcp251xfd_get_mode_str(const u8 mode)
95 {
96 	switch (mode) {
97 	case MCP251XFD_REG_CON_MODE_MIXED:
98 		return "Mixed (CAN FD/CAN 2.0)";
99 	case MCP251XFD_REG_CON_MODE_SLEEP:
100 		return "Sleep";
101 	case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
102 		return "Internal Loopback";
103 	case MCP251XFD_REG_CON_MODE_LISTENONLY:
104 		return "Listen Only";
105 	case MCP251XFD_REG_CON_MODE_CONFIG:
106 		return "Configuration";
107 	case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
108 		return "External Loopback";
109 	case MCP251XFD_REG_CON_MODE_CAN2_0:
110 		return "CAN 2.0";
111 	case MCP251XFD_REG_CON_MODE_RESTRICTED:
112 		return "Restricted Operation";
113 	}
114 
115 	return "<unknown>";
116 }
117 
118 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
119 {
120 	if (!priv->reg_vdd)
121 		return 0;
122 
123 	return regulator_enable(priv->reg_vdd);
124 }
125 
126 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
127 {
128 	if (!priv->reg_vdd)
129 		return 0;
130 
131 	return regulator_disable(priv->reg_vdd);
132 }
133 
134 static inline int
135 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
136 {
137 	if (!priv->reg_xceiver)
138 		return 0;
139 
140 	return regulator_enable(priv->reg_xceiver);
141 }
142 
143 static inline int
144 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
145 {
146 	if (!priv->reg_xceiver)
147 		return 0;
148 
149 	return regulator_disable(priv->reg_xceiver);
150 }
151 
152 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
153 {
154 	int err;
155 
156 	err = clk_prepare_enable(priv->clk);
157 	if (err)
158 		return err;
159 
160 	err = mcp251xfd_vdd_enable(priv);
161 	if (err)
162 		clk_disable_unprepare(priv->clk);
163 
164 	/* Wait for oscillator stabilisation time after power up */
165 	usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
166 		     2 * MCP251XFD_OSC_STAB_SLEEP_US);
167 
168 	return err;
169 }
170 
171 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
172 {
173 	int err;
174 
175 	err = mcp251xfd_vdd_disable(priv);
176 	if (err)
177 		return err;
178 
179 	clk_disable_unprepare(priv->clk);
180 
181 	return 0;
182 }
183 
184 static inline u8
185 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
186 				union mcp251xfd_write_reg_buf *write_reg_buf,
187 				const u16 reg, const u32 mask, const u32 val)
188 {
189 	u8 first_byte, last_byte, len;
190 	u8 *data;
191 	__le32 val_le32;
192 
193 	first_byte = mcp251xfd_first_byte_set(mask);
194 	last_byte = mcp251xfd_last_byte_set(mask);
195 	len = last_byte - first_byte + 1;
196 
197 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
198 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
199 	memcpy(data, &val_le32, len);
200 
201 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
202 		u16 crc;
203 
204 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
205 						     len);
206 		/* CRC */
207 		len += sizeof(write_reg_buf->crc.cmd);
208 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
209 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
210 
211 		/* Total length */
212 		len += sizeof(write_reg_buf->crc.crc);
213 	} else {
214 		len += sizeof(write_reg_buf->nocrc.cmd);
215 	}
216 
217 	return len;
218 }
219 
220 static inline int
221 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
222 				 u8 *tef_tail)
223 {
224 	u32 tef_ua;
225 	int err;
226 
227 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
228 	if (err)
229 		return err;
230 
231 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
232 
233 	return 0;
234 }
235 
236 static inline int
237 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
238 				u8 *tx_tail)
239 {
240 	u32 fifo_sta;
241 	int err;
242 
243 	err = regmap_read(priv->map_reg,
244 			  MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
245 			  &fifo_sta);
246 	if (err)
247 		return err;
248 
249 	*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
250 
251 	return 0;
252 }
253 
254 static inline int
255 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
256 				const struct mcp251xfd_rx_ring *ring,
257 				u8 *rx_head)
258 {
259 	u32 fifo_sta;
260 	int err;
261 
262 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
263 			  &fifo_sta);
264 	if (err)
265 		return err;
266 
267 	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
268 
269 	return 0;
270 }
271 
272 static inline int
273 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
274 				const struct mcp251xfd_rx_ring *ring,
275 				u8 *rx_tail)
276 {
277 	u32 fifo_ua;
278 	int err;
279 
280 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
281 			  &fifo_ua);
282 	if (err)
283 		return err;
284 
285 	fifo_ua -= ring->base - MCP251XFD_RAM_START;
286 	*rx_tail = fifo_ua / ring->obj_size;
287 
288 	return 0;
289 }
290 
291 static void
292 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
293 			      const struct mcp251xfd_tx_ring *ring,
294 			      struct mcp251xfd_tx_obj *tx_obj,
295 			      const u8 rts_buf_len,
296 			      const u8 n)
297 {
298 	struct spi_transfer *xfer;
299 	u16 addr;
300 
301 	/* FIFO load */
302 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
303 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
304 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
305 						     addr);
306 	else
307 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
308 					      addr);
309 
310 	xfer = &tx_obj->xfer[0];
311 	xfer->tx_buf = &tx_obj->buf;
312 	xfer->len = 0;	/* actual len is assigned on the fly */
313 	xfer->cs_change = 1;
314 	xfer->cs_change_delay.value = 0;
315 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
316 
317 	/* FIFO request to send */
318 	xfer = &tx_obj->xfer[1];
319 	xfer->tx_buf = &ring->rts_buf;
320 	xfer->len = rts_buf_len;
321 
322 	/* SPI message */
323 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
324 					ARRAY_SIZE(tx_obj->xfer));
325 }
326 
327 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
328 {
329 	struct mcp251xfd_tef_ring *tef_ring;
330 	struct mcp251xfd_tx_ring *tx_ring;
331 	struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
332 	struct mcp251xfd_tx_obj *tx_obj;
333 	u32 val;
334 	u16 addr;
335 	u8 len;
336 	int i, j;
337 
338 	netdev_reset_queue(priv->ndev);
339 
340 	/* TEF */
341 	tef_ring = priv->tef;
342 	tef_ring->head = 0;
343 	tef_ring->tail = 0;
344 
345 	/* FIFO increment TEF tail pointer */
346 	addr = MCP251XFD_REG_TEFCON;
347 	val = MCP251XFD_REG_TEFCON_UINC;
348 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
349 					      addr, val, val);
350 
351 	for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
352 		struct spi_transfer *xfer;
353 
354 		xfer = &tef_ring->uinc_xfer[j];
355 		xfer->tx_buf = &tef_ring->uinc_buf;
356 		xfer->len = len;
357 		xfer->cs_change = 1;
358 		xfer->cs_change_delay.value = 0;
359 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
360 	}
361 
362 	/* TX */
363 	tx_ring = priv->tx;
364 	tx_ring->head = 0;
365 	tx_ring->tail = 0;
366 	tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
367 
368 	/* FIFO request to send */
369 	addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
370 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
371 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
372 					      addr, val, val);
373 
374 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
375 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
376 
377 	/* RX */
378 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
379 		rx_ring->head = 0;
380 		rx_ring->tail = 0;
381 		rx_ring->nr = i;
382 		rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
383 
384 		if (!prev_rx_ring)
385 			rx_ring->base =
386 				mcp251xfd_get_tx_obj_addr(tx_ring,
387 							  tx_ring->obj_num);
388 		else
389 			rx_ring->base = prev_rx_ring->base +
390 				prev_rx_ring->obj_size *
391 				prev_rx_ring->obj_num;
392 
393 		prev_rx_ring = rx_ring;
394 
395 		/* FIFO increment RX tail pointer */
396 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
397 		val = MCP251XFD_REG_FIFOCON_UINC;
398 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
399 						      addr, val, val);
400 
401 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
402 			struct spi_transfer *xfer;
403 
404 			xfer = &rx_ring->uinc_xfer[j];
405 			xfer->tx_buf = &rx_ring->uinc_buf;
406 			xfer->len = len;
407 			xfer->cs_change = 1;
408 			xfer->cs_change_delay.value = 0;
409 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
410 		}
411 	}
412 }
413 
414 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
415 {
416 	int i;
417 
418 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
419 		kfree(priv->rx[i]);
420 		priv->rx[i] = NULL;
421 	}
422 }
423 
424 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
425 {
426 	struct mcp251xfd_tx_ring *tx_ring;
427 	struct mcp251xfd_rx_ring *rx_ring;
428 	int tef_obj_size, tx_obj_size, rx_obj_size;
429 	int tx_obj_num;
430 	int ram_free, i;
431 
432 	tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
433 	/* listen-only mode works like FD mode */
434 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
435 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
436 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
437 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
438 	} else {
439 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
440 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
441 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
442 	}
443 
444 	tx_ring = priv->tx;
445 	tx_ring->obj_num = tx_obj_num;
446 	tx_ring->obj_size = tx_obj_size;
447 
448 	ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
449 		(tef_obj_size + tx_obj_size);
450 
451 	for (i = 0;
452 	     i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
453 	     i++) {
454 		int rx_obj_num;
455 
456 		rx_obj_num = ram_free / rx_obj_size;
457 		rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
458 				 MCP251XFD_RX_OBJ_NUM_MAX);
459 
460 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
461 				  GFP_KERNEL);
462 		if (!rx_ring) {
463 			mcp251xfd_ring_free(priv);
464 			return -ENOMEM;
465 		}
466 		rx_ring->obj_num = rx_obj_num;
467 		rx_ring->obj_size = rx_obj_size;
468 		priv->rx[i] = rx_ring;
469 
470 		ram_free -= rx_ring->obj_num * rx_ring->obj_size;
471 	}
472 	priv->rx_ring_num = i;
473 
474 	netdev_dbg(priv->ndev,
475 		   "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
476 		   tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
477 		   tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
478 
479 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
480 		netdev_dbg(priv->ndev,
481 			   "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
482 			   i, rx_ring->obj_num, rx_ring->obj_size,
483 			   rx_ring->obj_size * rx_ring->obj_num);
484 	}
485 
486 	netdev_dbg(priv->ndev,
487 		   "FIFO setup: free: %d bytes\n",
488 		   ram_free);
489 
490 	return 0;
491 }
492 
493 static inline int
494 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
495 {
496 	u32 val;
497 	int err;
498 
499 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
500 	if (err)
501 		return err;
502 
503 	*mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
504 
505 	return 0;
506 }
507 
508 static int
509 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
510 			  const u8 mode_req, bool nowait)
511 {
512 	u32 con, con_reqop;
513 	int err;
514 
515 	con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
516 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
517 				 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
518 	if (err)
519 		return err;
520 
521 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
522 		return 0;
523 
524 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
525 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
526 						 con) == mode_req,
527 				       MCP251XFD_POLL_SLEEP_US,
528 				       MCP251XFD_POLL_TIMEOUT_US);
529 	if (err) {
530 		u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
531 
532 		netdev_err(priv->ndev,
533 			   "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
534 			   mcp251xfd_get_mode_str(mode_req), mode_req,
535 			   mcp251xfd_get_mode_str(mode), mode);
536 		return err;
537 	}
538 
539 	return 0;
540 }
541 
542 static inline int
543 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
544 			const u8 mode_req)
545 {
546 	return __mcp251xfd_chip_set_mode(priv, mode_req, false);
547 }
548 
549 static inline int
550 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
551 			       const u8 mode_req)
552 {
553 	return __mcp251xfd_chip_set_mode(priv, mode_req, true);
554 }
555 
556 static inline bool mcp251xfd_osc_invalid(u32 reg)
557 {
558 	return reg == 0x0 || reg == 0xffffffff;
559 }
560 
561 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
562 {
563 	u32 osc, osc_reference, osc_mask;
564 	int err;
565 
566 	/* Set Power On Defaults for "Clock Output Divisor" and remove
567 	 * "Oscillator Disable" bit.
568 	 */
569 	osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
570 			 MCP251XFD_REG_OSC_CLKODIV_10);
571 	osc_reference = MCP251XFD_REG_OSC_OSCRDY;
572 	osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
573 
574 	/* Note:
575 	 *
576 	 * If the controller is in Sleep Mode the following write only
577 	 * removes the "Oscillator Disable" bit and powers it up. All
578 	 * other bits are unaffected.
579 	 */
580 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
581 	if (err)
582 		return err;
583 
584 	/* Wait for "Oscillator Ready" bit */
585 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
586 				       (osc & osc_mask) == osc_reference,
587 				       MCP251XFD_OSC_STAB_SLEEP_US,
588 				       MCP251XFD_OSC_STAB_TIMEOUT_US);
589 	if (mcp251xfd_osc_invalid(osc)) {
590 		netdev_err(priv->ndev,
591 			   "Failed to detect %s (osc=0x%08x).\n",
592 			   mcp251xfd_get_model_str(priv), osc);
593 		return -ENODEV;
594 	} else if (err == -ETIMEDOUT) {
595 		netdev_err(priv->ndev,
596 			   "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
597 			   osc, osc_reference);
598 		return -ETIMEDOUT;
599 	}
600 
601 	return err;
602 }
603 
604 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
605 {
606 	const __be16 cmd = mcp251xfd_cmd_reset();
607 	int err;
608 
609 	/* The Set Mode and SPI Reset command only seems to works if
610 	 * the controller is not in Sleep Mode.
611 	 */
612 	err = mcp251xfd_chip_clock_enable(priv);
613 	if (err)
614 		return err;
615 
616 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
617 	if (err)
618 		return err;
619 
620 	/* spi_write_then_read() works with non DMA-safe buffers */
621 	return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
622 }
623 
624 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
625 {
626 	u32 osc, osc_reference;
627 	u8 mode;
628 	int err;
629 
630 	err = mcp251xfd_chip_get_mode(priv, &mode);
631 	if (err)
632 		return err;
633 
634 	if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
635 		netdev_info(priv->ndev,
636 			    "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
637 			    mcp251xfd_get_mode_str(mode), mode);
638 		return -ETIMEDOUT;
639 	}
640 
641 	osc_reference = MCP251XFD_REG_OSC_OSCRDY |
642 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
643 			   MCP251XFD_REG_OSC_CLKODIV_10);
644 
645 	/* check reset defaults of OSC reg */
646 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
647 	if (err)
648 		return err;
649 
650 	if (osc != osc_reference) {
651 		netdev_info(priv->ndev,
652 			    "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
653 			    osc, osc_reference);
654 		return -ETIMEDOUT;
655 	}
656 
657 	return 0;
658 }
659 
660 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
661 {
662 	int err, i;
663 
664 	for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
665 		if (i)
666 			netdev_info(priv->ndev,
667 				    "Retrying to reset controller.\n");
668 
669 		err = mcp251xfd_chip_softreset_do(priv);
670 		if (err == -ETIMEDOUT)
671 			continue;
672 		if (err)
673 			return err;
674 
675 		err = mcp251xfd_chip_softreset_check(priv);
676 		if (err == -ETIMEDOUT)
677 			continue;
678 		if (err)
679 			return err;
680 
681 		return 0;
682 	}
683 
684 	return err;
685 }
686 
687 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
688 {
689 	u32 osc;
690 	int err;
691 
692 	/* Activate Low Power Mode on Oscillator Disable. This only
693 	 * works on the MCP2518FD. The MCP2517FD will go into normal
694 	 * Sleep Mode instead.
695 	 */
696 	osc = MCP251XFD_REG_OSC_LPMEN |
697 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
698 			   MCP251XFD_REG_OSC_CLKODIV_10);
699 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
700 	if (err)
701 		return err;
702 
703 	/* Set Time Base Counter Prescaler to 1.
704 	 *
705 	 * This means an overflow of the 32 bit Time Base Counter
706 	 * register at 40 MHz every 107 seconds.
707 	 */
708 	return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
709 			    MCP251XFD_REG_TSCON_TBCEN);
710 }
711 
712 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
713 {
714 	const struct can_bittiming *bt = &priv->can.bittiming;
715 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
716 	u32 val = 0;
717 	s8 tdco;
718 	int err;
719 
720 	/* CAN Control Register
721 	 *
722 	 * - no transmit bandwidth sharing
723 	 * - config mode
724 	 * - disable transmit queue
725 	 * - store in transmit FIFO event
726 	 * - transition to restricted operation mode on system error
727 	 * - ESI is transmitted recessive when ESI of message is high or
728 	 *   CAN controller error passive
729 	 * - restricted retransmission attempts,
730 	 *   use TQXCON_TXAT and FIFOCON_TXAT
731 	 * - wake-up filter bits T11FILTER
732 	 * - use CAN bus line filter for wakeup
733 	 * - protocol exception is treated as a form error
734 	 * - Do not compare data bytes
735 	 */
736 	val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
737 			 MCP251XFD_REG_CON_MODE_CONFIG) |
738 		MCP251XFD_REG_CON_STEF |
739 		MCP251XFD_REG_CON_ESIGM |
740 		MCP251XFD_REG_CON_RTXAT |
741 		FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
742 			   MCP251XFD_REG_CON_WFT_T11FILTER) |
743 		MCP251XFD_REG_CON_WAKFIL |
744 		MCP251XFD_REG_CON_PXEDIS;
745 
746 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
747 		val |= MCP251XFD_REG_CON_ISOCRCEN;
748 
749 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
750 	if (err)
751 		return err;
752 
753 	/* Nominal Bit Time */
754 	val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
755 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
756 			   bt->prop_seg + bt->phase_seg1 - 1) |
757 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
758 			   bt->phase_seg2 - 1) |
759 		FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
760 
761 	err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
762 	if (err)
763 		return err;
764 
765 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
766 		return 0;
767 
768 	/* Data Bit Time */
769 	val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
770 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
771 			   dbt->prop_seg + dbt->phase_seg1 - 1) |
772 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
773 			   dbt->phase_seg2 - 1) |
774 		FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
775 
776 	err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
777 	if (err)
778 		return err;
779 
780 	/* Transmitter Delay Compensation */
781 	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
782 		       -64, 63);
783 	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
784 			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
785 		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
786 
787 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
788 }
789 
790 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
791 {
792 	u32 val;
793 
794 	if (!priv->rx_int)
795 		return 0;
796 
797 	/* Configure GPIOs:
798 	 * - PIN0: GPIO Input
799 	 * - PIN1: GPIO Input/RX Interrupt
800 	 *
801 	 * PIN1 must be Input, otherwise there is a glitch on the
802 	 * rx-INT line. It happens between setting the PIN as output
803 	 * (in the first byte of the SPI transfer) and configuring the
804 	 * PIN as interrupt (in the last byte of the SPI transfer).
805 	 */
806 	val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
807 		MCP251XFD_REG_IOCON_TRIS0;
808 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
809 }
810 
811 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
812 {
813 	u32 val;
814 
815 	if (!priv->rx_int)
816 		return 0;
817 
818 	/* Configure GPIOs:
819 	 * - PIN0: GPIO Input
820 	 * - PIN1: GPIO Input
821 	 */
822 	val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
823 		MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
824 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
825 }
826 
827 static int
828 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
829 				const struct mcp251xfd_rx_ring *ring)
830 {
831 	u32 fifo_con;
832 
833 	/* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
834 	 *
835 	 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
836 	 * generate a RXOVIF, use this to properly detect RX MAB
837 	 * overflows.
838 	 */
839 	fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
840 			      ring->obj_num - 1) |
841 		MCP251XFD_REG_FIFOCON_RXTSEN |
842 		MCP251XFD_REG_FIFOCON_RXOVIE |
843 		MCP251XFD_REG_FIFOCON_TFNRFNIE;
844 
845 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
846 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
847 				       MCP251XFD_REG_FIFOCON_PLSIZE_64);
848 	else
849 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
850 				       MCP251XFD_REG_FIFOCON_PLSIZE_8);
851 
852 	return regmap_write(priv->map_reg,
853 			    MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
854 }
855 
856 static int
857 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
858 				  const struct mcp251xfd_rx_ring *ring)
859 {
860 	u32 fltcon;
861 
862 	fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
863 		MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
864 
865 	return regmap_update_bits(priv->map_reg,
866 				  MCP251XFD_REG_FLTCON(ring->nr >> 2),
867 				  MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
868 				  fltcon);
869 }
870 
871 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
872 {
873 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
874 	const struct mcp251xfd_rx_ring *rx_ring;
875 	u32 val;
876 	int err, n;
877 
878 	/* TEF */
879 	val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
880 			 tx_ring->obj_num - 1) |
881 		MCP251XFD_REG_TEFCON_TEFTSEN |
882 		MCP251XFD_REG_TEFCON_TEFOVIE |
883 		MCP251XFD_REG_TEFCON_TEFNEIE;
884 
885 	err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
886 	if (err)
887 		return err;
888 
889 	/* FIFO 1 - TX */
890 	val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
891 			 tx_ring->obj_num - 1) |
892 		MCP251XFD_REG_FIFOCON_TXEN |
893 		MCP251XFD_REG_FIFOCON_TXATIE;
894 
895 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
896 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
897 				  MCP251XFD_REG_FIFOCON_PLSIZE_64);
898 	else
899 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
900 				  MCP251XFD_REG_FIFOCON_PLSIZE_8);
901 
902 	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
903 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
904 				  MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
905 	else
906 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
907 				  MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
908 
909 	err = regmap_write(priv->map_reg,
910 			   MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
911 			   val);
912 	if (err)
913 		return err;
914 
915 	/* RX FIFOs */
916 	mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
917 		err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
918 		if (err)
919 			return err;
920 
921 		err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
922 		if (err)
923 			return err;
924 	}
925 
926 	return 0;
927 }
928 
929 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
930 {
931 	struct mcp251xfd_ecc *ecc = &priv->ecc;
932 	void *ram;
933 	u32 val = 0;
934 	int err;
935 
936 	ecc->ecc_stat = 0;
937 
938 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
939 		val = MCP251XFD_REG_ECCCON_ECCEN;
940 
941 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
942 				 MCP251XFD_REG_ECCCON_ECCEN, val);
943 	if (err)
944 		return err;
945 
946 	ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
947 	if (!ram)
948 		return -ENOMEM;
949 
950 	err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
951 			       MCP251XFD_RAM_SIZE);
952 	kfree(ram);
953 
954 	return err;
955 }
956 
957 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
958 {
959 	struct mcp251xfd_ecc *ecc = &priv->ecc;
960 
961 	ecc->ecc_stat = 0;
962 }
963 
964 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
965 {
966 	u8 mode;
967 
968 
969 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
970 		mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
971 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
972 		mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
973 	else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
974 		mode = MCP251XFD_REG_CON_MODE_MIXED;
975 	else
976 		mode = MCP251XFD_REG_CON_MODE_CAN2_0;
977 
978 	return mode;
979 }
980 
981 static int
982 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
983 				 bool nowait)
984 {
985 	u8 mode;
986 
987 	mode = mcp251xfd_get_normal_mode(priv);
988 
989 	return __mcp251xfd_chip_set_mode(priv, mode, nowait);
990 }
991 
992 static inline int
993 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
994 {
995 	return __mcp251xfd_chip_set_normal_mode(priv, false);
996 }
997 
998 static inline int
999 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
1000 {
1001 	return __mcp251xfd_chip_set_normal_mode(priv, true);
1002 }
1003 
1004 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1005 {
1006 	u32 val;
1007 	int err;
1008 
1009 	val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1010 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1011 	if (err)
1012 		return err;
1013 
1014 	val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1015 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1016 	if (err)
1017 		return err;
1018 
1019 	val = MCP251XFD_REG_INT_CERRIE |
1020 		MCP251XFD_REG_INT_SERRIE |
1021 		MCP251XFD_REG_INT_RXOVIE |
1022 		MCP251XFD_REG_INT_TXATIE |
1023 		MCP251XFD_REG_INT_SPICRCIE |
1024 		MCP251XFD_REG_INT_ECCIE |
1025 		MCP251XFD_REG_INT_TEFIE |
1026 		MCP251XFD_REG_INT_MODIE |
1027 		MCP251XFD_REG_INT_RXIE;
1028 
1029 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1030 		val |= MCP251XFD_REG_INT_IVMIE;
1031 
1032 	return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1033 }
1034 
1035 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1036 {
1037 	int err;
1038 	u32 mask;
1039 
1040 	err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1041 	if (err)
1042 		return err;
1043 
1044 	mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1045 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1046 				 mask, 0x0);
1047 	if (err)
1048 		return err;
1049 
1050 	return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1051 }
1052 
1053 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1054 			       const enum can_state state)
1055 {
1056 	priv->can.state = state;
1057 
1058 	mcp251xfd_chip_interrupts_disable(priv);
1059 	mcp251xfd_chip_rx_int_disable(priv);
1060 	return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1061 }
1062 
1063 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1064 {
1065 	int err;
1066 
1067 	err = mcp251xfd_chip_softreset(priv);
1068 	if (err)
1069 		goto out_chip_stop;
1070 
1071 	err = mcp251xfd_chip_clock_init(priv);
1072 	if (err)
1073 		goto out_chip_stop;
1074 
1075 	err = mcp251xfd_set_bittiming(priv);
1076 	if (err)
1077 		goto out_chip_stop;
1078 
1079 	err = mcp251xfd_chip_rx_int_enable(priv);
1080 	if (err)
1081 		return err;
1082 
1083 	err = mcp251xfd_chip_ecc_init(priv);
1084 	if (err)
1085 		goto out_chip_stop;
1086 
1087 	mcp251xfd_ring_init(priv);
1088 
1089 	err = mcp251xfd_chip_fifo_init(priv);
1090 	if (err)
1091 		goto out_chip_stop;
1092 
1093 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1094 
1095 	err = mcp251xfd_chip_set_normal_mode(priv);
1096 	if (err)
1097 		goto out_chip_stop;
1098 
1099 	return 0;
1100 
1101  out_chip_stop:
1102 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1103 
1104 	return err;
1105 }
1106 
1107 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1108 {
1109 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
1110 	int err;
1111 
1112 	switch (mode) {
1113 	case CAN_MODE_START:
1114 		err = mcp251xfd_chip_start(priv);
1115 		if (err)
1116 			return err;
1117 
1118 		err = mcp251xfd_chip_interrupts_enable(priv);
1119 		if (err) {
1120 			mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1121 			return err;
1122 		}
1123 
1124 		netif_wake_queue(ndev);
1125 		break;
1126 
1127 	default:
1128 		return -EOPNOTSUPP;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1135 					struct can_berr_counter *bec)
1136 {
1137 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1138 	u32 trec;
1139 	int err;
1140 
1141 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1142 	if (err)
1143 		return err;
1144 
1145 	if (trec & MCP251XFD_REG_TREC_TXBO)
1146 		bec->txerr = 256;
1147 	else
1148 		bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1149 	bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1150 
1151 	return 0;
1152 }
1153 
1154 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1155 				      struct can_berr_counter *bec)
1156 {
1157 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1158 
1159 	/* Avoid waking up the controller if the interface is down */
1160 	if (!(ndev->flags & IFF_UP))
1161 		return 0;
1162 
1163 	/* The controller is powered down during Bus Off, use saved
1164 	 * bec values.
1165 	 */
1166 	if (priv->can.state == CAN_STATE_BUS_OFF) {
1167 		*bec = priv->bec;
1168 		return 0;
1169 	}
1170 
1171 	return __mcp251xfd_get_berr_counter(ndev, bec);
1172 }
1173 
1174 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1175 {
1176 	u8 tef_tail_chip, tef_tail;
1177 	int err;
1178 
1179 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1180 		return 0;
1181 
1182 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1183 	if (err)
1184 		return err;
1185 
1186 	tef_tail = mcp251xfd_get_tef_tail(priv);
1187 	if (tef_tail_chip != tef_tail) {
1188 		netdev_err(priv->ndev,
1189 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1190 			   tef_tail_chip, tef_tail);
1191 		return -EILSEQ;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int
1198 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1199 			const struct mcp251xfd_rx_ring *ring)
1200 {
1201 	u8 rx_tail_chip, rx_tail;
1202 	int err;
1203 
1204 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1205 		return 0;
1206 
1207 	err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1208 	if (err)
1209 		return err;
1210 
1211 	rx_tail = mcp251xfd_get_rx_tail(ring);
1212 	if (rx_tail_chip != rx_tail) {
1213 		netdev_err(priv->ndev,
1214 			   "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1215 			   rx_tail_chip, rx_tail);
1216 		return -EILSEQ;
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 static int
1223 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1224 {
1225 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1226 	u32 tef_sta;
1227 	int err;
1228 
1229 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1230 	if (err)
1231 		return err;
1232 
1233 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1234 		netdev_err(priv->ndev,
1235 			   "Transmit Event FIFO buffer overflow.\n");
1236 		return -ENOBUFS;
1237 	}
1238 
1239 	netdev_info(priv->ndev,
1240 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
1241 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1242 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1243 		    "not empty" : "empty",
1244 		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1245 
1246 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
1247 	return -EAGAIN;
1248 }
1249 
1250 static int
1251 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1252 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1253 			   unsigned int *frame_len_ptr)
1254 {
1255 	struct net_device_stats *stats = &priv->ndev->stats;
1256 	u32 seq, seq_masked, tef_tail_masked;
1257 
1258 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1259 			hw_tef_obj->flags);
1260 
1261 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
1262 	 * compare 7 bits, this should be enough to detect
1263 	 * net-yet-completed, i.e. old TEF objects.
1264 	 */
1265 	seq_masked = seq &
1266 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1267 	tef_tail_masked = priv->tef->tail &
1268 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1269 	if (seq_masked != tef_tail_masked)
1270 		return mcp251xfd_handle_tefif_recover(priv, seq);
1271 
1272 	stats->tx_bytes +=
1273 		can_rx_offload_get_echo_skb(&priv->offload,
1274 					    mcp251xfd_get_tef_tail(priv),
1275 					    hw_tef_obj->ts,
1276 					    frame_len_ptr);
1277 	stats->tx_packets++;
1278 	priv->tef->tail++;
1279 
1280 	return 0;
1281 }
1282 
1283 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1284 {
1285 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1286 	unsigned int new_head;
1287 	u8 chip_tx_tail;
1288 	int err;
1289 
1290 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1291 	if (err)
1292 		return err;
1293 
1294 	/* chip_tx_tail, is the next TX-Object send by the HW.
1295 	 * The new TEF head must be >= the old head, ...
1296 	 */
1297 	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1298 	if (new_head <= priv->tef->head)
1299 		new_head += tx_ring->obj_num;
1300 
1301 	/* ... but it cannot exceed the TX head. */
1302 	priv->tef->head = min(new_head, tx_ring->head);
1303 
1304 	return mcp251xfd_check_tef_tail(priv);
1305 }
1306 
1307 static inline int
1308 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1309 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1310 		       const u8 offset, const u8 len)
1311 {
1312 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1313 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1314 
1315 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1316 	    (offset > tx_ring->obj_num ||
1317 	     len > tx_ring->obj_num ||
1318 	     offset + len > tx_ring->obj_num)) {
1319 		netdev_err(priv->ndev,
1320 			   "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1321 			   tx_ring->obj_num, offset, len);
1322 		return -ERANGE;
1323 	}
1324 
1325 	return regmap_bulk_read(priv->map_rx,
1326 				mcp251xfd_get_tef_obj_addr(offset),
1327 				hw_tef_obj,
1328 				sizeof(*hw_tef_obj) / val_bytes * len);
1329 }
1330 
1331 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1332 {
1333 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1334 	unsigned int total_frame_len = 0;
1335 	u8 tef_tail, len, l;
1336 	int err, i;
1337 
1338 	err = mcp251xfd_tef_ring_update(priv);
1339 	if (err)
1340 		return err;
1341 
1342 	tef_tail = mcp251xfd_get_tef_tail(priv);
1343 	len = mcp251xfd_get_tef_len(priv);
1344 	l = mcp251xfd_get_tef_linear_len(priv);
1345 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1346 	if (err)
1347 		return err;
1348 
1349 	if (l < len) {
1350 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1351 		if (err)
1352 			return err;
1353 	}
1354 
1355 	for (i = 0; i < len; i++) {
1356 		unsigned int frame_len;
1357 
1358 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
1359 		/* -EAGAIN means the Sequence Number in the TEF
1360 		 * doesn't match our tef_tail. This can happen if we
1361 		 * read the TEF objects too early. Leave loop let the
1362 		 * interrupt handler call us again.
1363 		 */
1364 		if (err == -EAGAIN)
1365 			goto out_netif_wake_queue;
1366 		if (err)
1367 			return err;
1368 
1369 		total_frame_len += frame_len;
1370 	}
1371 
1372  out_netif_wake_queue:
1373 	len = i;	/* number of handled goods TEFs */
1374 	if (len) {
1375 		struct mcp251xfd_tef_ring *ring = priv->tef;
1376 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1377 		struct spi_transfer *last_xfer;
1378 
1379 		/* Increment the TEF FIFO tail pointer 'len' times in
1380 		 * a single SPI message.
1381 		 *
1382 		 * Note:
1383 		 *
1384 		 * "cs_change == 1" on the last transfer results in an
1385 		 * active chip select after the complete SPI
1386 		 * message. This causes the controller to interpret
1387 		 * the next register access as data. Temporary set
1388 		 * "cs_change" of the last transfer to "0" to properly
1389 		 * deactivate the chip select at the end of the
1390 		 * message.
1391 		 */
1392 		last_xfer = &ring->uinc_xfer[len - 1];
1393 		last_xfer->cs_change = 0;
1394 		err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
1395 		last_xfer->cs_change = 1;
1396 		if (err)
1397 			return err;
1398 
1399 		tx_ring->tail += len;
1400 		netdev_completed_queue(priv->ndev, len, total_frame_len);
1401 
1402 		err = mcp251xfd_check_tef_tail(priv);
1403 		if (err)
1404 			return err;
1405 	}
1406 
1407 	mcp251xfd_ecc_tefif_successful(priv);
1408 
1409 	if (mcp251xfd_get_tx_free(priv->tx)) {
1410 		/* Make sure that anybody stopping the queue after
1411 		 * this sees the new tx_ring->tail.
1412 		 */
1413 		smp_mb();
1414 		netif_wake_queue(priv->ndev);
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static int
1421 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1422 			 struct mcp251xfd_rx_ring *ring)
1423 {
1424 	u32 new_head;
1425 	u8 chip_rx_head;
1426 	int err;
1427 
1428 	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1429 	if (err)
1430 		return err;
1431 
1432 	/* chip_rx_head, is the next RX-Object filled by the HW.
1433 	 * The new RX head must be >= the old head.
1434 	 */
1435 	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1436 	if (new_head <= ring->head)
1437 		new_head += ring->obj_num;
1438 
1439 	ring->head = new_head;
1440 
1441 	return mcp251xfd_check_rx_tail(priv, ring);
1442 }
1443 
1444 static void
1445 mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
1446 			   const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1447 			   struct sk_buff *skb)
1448 {
1449 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1450 	u8 dlc;
1451 
1452 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1453 		u32 sid, eid;
1454 
1455 		eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1456 		sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1457 
1458 		cfd->can_id = CAN_EFF_FLAG |
1459 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1460 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1461 	} else {
1462 		cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1463 					hw_rx_obj->id);
1464 	}
1465 
1466 	dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
1467 
1468 	/* CANFD */
1469 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1470 
1471 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1472 			cfd->flags |= CANFD_ESI;
1473 
1474 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1475 			cfd->flags |= CANFD_BRS;
1476 
1477 		cfd->len = can_fd_dlc2len(dlc);
1478 	} else {
1479 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1480 			cfd->can_id |= CAN_RTR_FLAG;
1481 
1482 		can_frame_set_cc_len((struct can_frame *)cfd, dlc,
1483 				     priv->can.ctrlmode);
1484 	}
1485 
1486 	if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
1487 		memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1488 }
1489 
1490 static int
1491 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1492 			  struct mcp251xfd_rx_ring *ring,
1493 			  const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1494 {
1495 	struct net_device_stats *stats = &priv->ndev->stats;
1496 	struct sk_buff *skb;
1497 	struct canfd_frame *cfd;
1498 	int err;
1499 
1500 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1501 		skb = alloc_canfd_skb(priv->ndev, &cfd);
1502 	else
1503 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1504 
1505 	if (!skb) {
1506 		stats->rx_dropped++;
1507 		return 0;
1508 	}
1509 
1510 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1511 	err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1512 	if (err)
1513 		stats->rx_fifo_errors++;
1514 
1515 	return 0;
1516 }
1517 
1518 static inline int
1519 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1520 		      const struct mcp251xfd_rx_ring *ring,
1521 		      struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1522 		      const u8 offset, const u8 len)
1523 {
1524 	const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1525 	int err;
1526 
1527 	err = regmap_bulk_read(priv->map_rx,
1528 			       mcp251xfd_get_rx_obj_addr(ring, offset),
1529 			       hw_rx_obj,
1530 			       len * ring->obj_size / val_bytes);
1531 
1532 	return err;
1533 }
1534 
1535 static int
1536 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1537 			   struct mcp251xfd_rx_ring *ring)
1538 {
1539 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1540 	u8 rx_tail, len;
1541 	int err, i;
1542 
1543 	err = mcp251xfd_rx_ring_update(priv, ring);
1544 	if (err)
1545 		return err;
1546 
1547 	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1548 		struct spi_transfer *last_xfer;
1549 
1550 		rx_tail = mcp251xfd_get_rx_tail(ring);
1551 
1552 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1553 					    rx_tail, len);
1554 		if (err)
1555 			return err;
1556 
1557 		for (i = 0; i < len; i++) {
1558 			err = mcp251xfd_handle_rxif_one(priv, ring,
1559 							(void *)hw_rx_obj +
1560 							i * ring->obj_size);
1561 			if (err)
1562 				return err;
1563 		}
1564 
1565 		/* Increment the RX FIFO tail pointer 'len' times in a
1566 		 * single SPI message.
1567 		 *
1568 		 * Note:
1569 		 *
1570 		 * "cs_change == 1" on the last transfer results in an
1571 		 * active chip select after the complete SPI
1572 		 * message. This causes the controller to interpret
1573 		 * the next register access as data. Temporary set
1574 		 * "cs_change" of the last transfer to "0" to properly
1575 		 * deactivate the chip select at the end of the
1576 		 * message.
1577 		 */
1578 		last_xfer = &ring->uinc_xfer[len - 1];
1579 		last_xfer->cs_change = 0;
1580 		err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
1581 		last_xfer->cs_change = 1;
1582 		if (err)
1583 			return err;
1584 
1585 		ring->tail += len;
1586 	}
1587 
1588 	return 0;
1589 }
1590 
1591 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1592 {
1593 	struct mcp251xfd_rx_ring *ring;
1594 	int err, n;
1595 
1596 	mcp251xfd_for_each_rx_ring(priv, ring, n) {
1597 		err = mcp251xfd_handle_rxif_ring(priv, ring);
1598 		if (err)
1599 			return err;
1600 	}
1601 
1602 	return 0;
1603 }
1604 
1605 static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
1606 					  u32 *timestamp)
1607 {
1608 	return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
1609 }
1610 
1611 static struct sk_buff *
1612 mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
1613 			    struct can_frame **cf, u32 *timestamp)
1614 {
1615 	int err;
1616 
1617 	err = mcp251xfd_get_timestamp(priv, timestamp);
1618 	if (err)
1619 		return NULL;
1620 
1621 	return alloc_can_err_skb(priv->ndev, cf);
1622 }
1623 
1624 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1625 {
1626 	struct net_device_stats *stats = &priv->ndev->stats;
1627 	struct mcp251xfd_rx_ring *ring;
1628 	struct sk_buff *skb;
1629 	struct can_frame *cf;
1630 	u32 timestamp, rxovif;
1631 	int err, i;
1632 
1633 	stats->rx_over_errors++;
1634 	stats->rx_errors++;
1635 
1636 	err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1637 	if (err)
1638 		return err;
1639 
1640 	mcp251xfd_for_each_rx_ring(priv, ring, i) {
1641 		if (!(rxovif & BIT(ring->fifo_nr)))
1642 			continue;
1643 
1644 		/* If SERRIF is active, there was a RX MAB overflow. */
1645 		if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1646 			netdev_info(priv->ndev,
1647 				    "RX-%d: MAB overflow detected.\n",
1648 				    ring->nr);
1649 		} else {
1650 			netdev_info(priv->ndev,
1651 				    "RX-%d: FIFO overflow.\n", ring->nr);
1652 		}
1653 
1654 		err = regmap_update_bits(priv->map_reg,
1655 					 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1656 					 MCP251XFD_REG_FIFOSTA_RXOVIF,
1657 					 0x0);
1658 		if (err)
1659 			return err;
1660 	}
1661 
1662 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1663 	if (!skb)
1664 		return 0;
1665 
1666 	cf->can_id |= CAN_ERR_CRTL;
1667 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1668 
1669 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1670 	if (err)
1671 		stats->rx_fifo_errors++;
1672 
1673 	return 0;
1674 }
1675 
1676 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1677 {
1678 	netdev_info(priv->ndev, "%s\n", __func__);
1679 
1680 	return 0;
1681 }
1682 
1683 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1684 {
1685 	struct net_device_stats *stats = &priv->ndev->stats;
1686 	u32 bdiag1, timestamp;
1687 	struct sk_buff *skb;
1688 	struct can_frame *cf = NULL;
1689 	int err;
1690 
1691 	err = mcp251xfd_get_timestamp(priv, &timestamp);
1692 	if (err)
1693 		return err;
1694 
1695 	err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1696 	if (err)
1697 		return err;
1698 
1699 	/* Write 0s to clear error bits, don't write 1s to non active
1700 	 * bits, as they will be set.
1701 	 */
1702 	err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1703 	if (err)
1704 		return err;
1705 
1706 	priv->can.can_stats.bus_error++;
1707 
1708 	skb = alloc_can_err_skb(priv->ndev, &cf);
1709 	if (cf)
1710 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1711 
1712 	/* Controller misconfiguration */
1713 	if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1714 		netdev_err(priv->ndev,
1715 			   "recv'd DLC is larger than PLSIZE of FIFO element.");
1716 
1717 	/* RX errors */
1718 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1719 		      MCP251XFD_REG_BDIAG1_NCRCERR)) {
1720 		netdev_dbg(priv->ndev, "CRC error\n");
1721 
1722 		stats->rx_errors++;
1723 		if (cf)
1724 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1725 	}
1726 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1727 		      MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1728 		netdev_dbg(priv->ndev, "Stuff error\n");
1729 
1730 		stats->rx_errors++;
1731 		if (cf)
1732 			cf->data[2] |= CAN_ERR_PROT_STUFF;
1733 	}
1734 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1735 		      MCP251XFD_REG_BDIAG1_NFORMERR)) {
1736 		netdev_dbg(priv->ndev, "Format error\n");
1737 
1738 		stats->rx_errors++;
1739 		if (cf)
1740 			cf->data[2] |= CAN_ERR_PROT_FORM;
1741 	}
1742 
1743 	/* TX errors */
1744 	if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1745 		netdev_dbg(priv->ndev, "NACK error\n");
1746 
1747 		stats->tx_errors++;
1748 		if (cf) {
1749 			cf->can_id |= CAN_ERR_ACK;
1750 			cf->data[2] |= CAN_ERR_PROT_TX;
1751 		}
1752 	}
1753 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1754 		      MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1755 		netdev_dbg(priv->ndev, "Bit1 error\n");
1756 
1757 		stats->tx_errors++;
1758 		if (cf)
1759 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1760 	}
1761 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1762 		      MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1763 		netdev_dbg(priv->ndev, "Bit0 error\n");
1764 
1765 		stats->tx_errors++;
1766 		if (cf)
1767 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1768 	}
1769 
1770 	if (!cf)
1771 		return 0;
1772 
1773 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1774 	if (err)
1775 		stats->rx_fifo_errors++;
1776 
1777 	return 0;
1778 }
1779 
1780 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1781 {
1782 	struct net_device_stats *stats = &priv->ndev->stats;
1783 	struct sk_buff *skb;
1784 	struct can_frame *cf = NULL;
1785 	enum can_state new_state, rx_state, tx_state;
1786 	u32 trec, timestamp;
1787 	int err;
1788 
1789 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1790 	if (err)
1791 		return err;
1792 
1793 	if (trec & MCP251XFD_REG_TREC_TXBO)
1794 		tx_state = CAN_STATE_BUS_OFF;
1795 	else if (trec & MCP251XFD_REG_TREC_TXBP)
1796 		tx_state = CAN_STATE_ERROR_PASSIVE;
1797 	else if (trec & MCP251XFD_REG_TREC_TXWARN)
1798 		tx_state = CAN_STATE_ERROR_WARNING;
1799 	else
1800 		tx_state = CAN_STATE_ERROR_ACTIVE;
1801 
1802 	if (trec & MCP251XFD_REG_TREC_RXBP)
1803 		rx_state = CAN_STATE_ERROR_PASSIVE;
1804 	else if (trec & MCP251XFD_REG_TREC_RXWARN)
1805 		rx_state = CAN_STATE_ERROR_WARNING;
1806 	else
1807 		rx_state = CAN_STATE_ERROR_ACTIVE;
1808 
1809 	new_state = max(tx_state, rx_state);
1810 	if (new_state == priv->can.state)
1811 		return 0;
1812 
1813 	/* The skb allocation might fail, but can_change_state()
1814 	 * handles cf == NULL.
1815 	 */
1816 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1817 	can_change_state(priv->ndev, cf, tx_state, rx_state);
1818 
1819 	if (new_state == CAN_STATE_BUS_OFF) {
1820 		/* As we're going to switch off the chip now, let's
1821 		 * save the error counters and return them to
1822 		 * userspace, if do_get_berr_counter() is called while
1823 		 * the chip is in Bus Off.
1824 		 */
1825 		err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1826 		if (err)
1827 			return err;
1828 
1829 		mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1830 		can_bus_off(priv->ndev);
1831 	}
1832 
1833 	if (!skb)
1834 		return 0;
1835 
1836 	if (new_state != CAN_STATE_BUS_OFF) {
1837 		struct can_berr_counter bec;
1838 
1839 		err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1840 		if (err)
1841 			return err;
1842 		cf->data[6] = bec.txerr;
1843 		cf->data[7] = bec.rxerr;
1844 	}
1845 
1846 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1847 	if (err)
1848 		stats->rx_fifo_errors++;
1849 
1850 	return 0;
1851 }
1852 
1853 static int
1854 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1855 {
1856 	const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1857 	u8 mode;
1858 	int err;
1859 
1860 	err = mcp251xfd_chip_get_mode(priv, &mode);
1861 	if (err)
1862 		return err;
1863 
1864 	if (mode == mode_reference) {
1865 		netdev_dbg(priv->ndev,
1866 			   "Controller changed into %s Mode (%u).\n",
1867 			   mcp251xfd_get_mode_str(mode), mode);
1868 		return 0;
1869 	}
1870 
1871 	/* According to MCP2517FD errata DS80000792B 1., during a TX
1872 	 * MAB underflow, the controller will transition to Restricted
1873 	 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1874 	 *
1875 	 * However this is not always the case. If SERR2LOM is
1876 	 * configured for Restricted Operation Mode (SERR2LOM not set)
1877 	 * the MCP2517FD will sometimes transition to Listen Only Mode
1878 	 * first. When polling this bit we see that it will transition
1879 	 * to Restricted Operation Mode shortly after.
1880 	 */
1881 	if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1882 	    (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1883 	     mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1884 		netdev_dbg(priv->ndev,
1885 			   "Controller changed into %s Mode (%u).\n",
1886 			   mcp251xfd_get_mode_str(mode), mode);
1887 	else
1888 		netdev_err(priv->ndev,
1889 			   "Controller changed into %s Mode (%u).\n",
1890 			   mcp251xfd_get_mode_str(mode), mode);
1891 
1892 	/* After the application requests Normal mode, the controller
1893 	 * will automatically attempt to retransmit the message that
1894 	 * caused the TX MAB underflow.
1895 	 *
1896 	 * However, if there is an ECC error in the TX-RAM, we first
1897 	 * have to reload the tx-object before requesting Normal
1898 	 * mode. This is done later in mcp251xfd_handle_eccif().
1899 	 */
1900 	if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1901 		*set_normal_mode = true;
1902 		return 0;
1903 	}
1904 
1905 	return mcp251xfd_chip_set_normal_mode_nowait(priv);
1906 }
1907 
1908 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1909 {
1910 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1911 	struct net_device_stats *stats = &priv->ndev->stats;
1912 	bool handled = false;
1913 
1914 	/* TX MAB underflow
1915 	 *
1916 	 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1917 	 * underflow is indicated by SERRIF and MODIF.
1918 	 *
1919 	 * In addition to the effects mentioned in the Errata, there
1920 	 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1921 	 * will be seen as well.
1922 	 *
1923 	 * Sometimes there is an ECC error in the TX-RAM, which leads
1924 	 * to a TX MAB underflow.
1925 	 *
1926 	 * However, probably due to a race condition, there is no
1927 	 * associated MODIF pending.
1928 	 *
1929 	 * Further, there are situations, where the SERRIF is caused
1930 	 * by an ECC error in the TX-RAM, but not even the ECCIF is
1931 	 * set. This only seems to happen _after_ the first occurrence
1932 	 * of a ECCIF (which is tracked in ecc->cnt).
1933 	 *
1934 	 * Treat all as a known system errors..
1935 	 */
1936 	if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1937 	     priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1938 	    priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1939 	    ecc->cnt) {
1940 		const char *msg;
1941 
1942 		if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1943 		    ecc->cnt)
1944 			msg = "TX MAB underflow due to ECC error detected.";
1945 		else
1946 			msg = "TX MAB underflow detected.";
1947 
1948 		if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1949 			netdev_dbg(priv->ndev, "%s\n", msg);
1950 		else
1951 			netdev_info(priv->ndev, "%s\n", msg);
1952 
1953 		stats->tx_aborted_errors++;
1954 		stats->tx_errors++;
1955 		handled = true;
1956 	}
1957 
1958 	/* RX MAB overflow
1959 	 *
1960 	 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1961 	 * overflow is indicated by SERRIF.
1962 	 *
1963 	 * In addition to the effects mentioned in the Errata, (most
1964 	 * of the times) a RXOVIF is raised, if the FIFO that is being
1965 	 * received into has the RXOVIE activated (and we have enabled
1966 	 * RXOVIE on all FIFOs).
1967 	 *
1968 	 * Sometimes there is no RXOVIF just a RXIF is pending.
1969 	 *
1970 	 * Treat all as a known system errors..
1971 	 */
1972 	if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1973 	    priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1974 		stats->rx_dropped++;
1975 		handled = true;
1976 	}
1977 
1978 	if (!handled)
1979 		netdev_err(priv->ndev,
1980 			   "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1981 			   priv->regs_status.intf);
1982 
1983 	return 0;
1984 }
1985 
1986 static int
1987 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1988 {
1989 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1990 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1991 	struct mcp251xfd_tx_obj *tx_obj;
1992 	u8 chip_tx_tail, tx_tail, offset;
1993 	u16 addr;
1994 	int err;
1995 
1996 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
1997 
1998 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1999 	if (err)
2000 		return err;
2001 
2002 	tx_tail = mcp251xfd_get_tx_tail(tx_ring);
2003 	offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
2004 
2005 	/* Bail out if one of the following is met:
2006 	 * - tx_tail information is inconsistent
2007 	 * - for mcp2517fd: offset not 0
2008 	 * - for mcp2518fd: offset not 0 or 1
2009 	 */
2010 	if (chip_tx_tail != tx_tail ||
2011 	    !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
2012 		netdev_err(priv->ndev,
2013 			   "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2014 			   addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2015 			   offset);
2016 		return -EINVAL;
2017 	}
2018 
2019 	netdev_info(priv->ndev,
2020 		    "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2021 		    ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2022 		    "Single" : "Double",
2023 		    addr, nr, tx_ring->tail, tx_tail, offset);
2024 
2025 	/* reload tx_obj into controller RAM ... */
2026 	tx_obj = &tx_ring->obj[nr];
2027 	err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2028 	if (err)
2029 		return err;
2030 
2031 	/* ... and trigger retransmit */
2032 	return mcp251xfd_chip_set_normal_mode(priv);
2033 }
2034 
2035 static int
2036 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2037 {
2038 	struct mcp251xfd_ecc *ecc = &priv->ecc;
2039 	const char *msg;
2040 	bool in_tx_ram;
2041 	u32 ecc_stat;
2042 	u16 addr;
2043 	u8 nr;
2044 	int err;
2045 
2046 	err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2047 	if (err)
2048 		return err;
2049 
2050 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2051 				 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2052 	if (err)
2053 		return err;
2054 
2055 	/* Check if ECC error occurred in TX-RAM */
2056 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2057 	err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2058 	if (!err)
2059 		in_tx_ram = true;
2060 	else if (err == -ENOENT)
2061 		in_tx_ram = false;
2062 	else
2063 		return err;
2064 
2065 	/* Errata Reference:
2066 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
2067 	 *
2068 	 * ECC single error correction does not work in all cases:
2069 	 *
2070 	 * Fix/Work Around:
2071 	 * Enable single error correction and double error detection
2072 	 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
2073 	 * detection interrupt and do not rely on the error
2074 	 * correction. Instead, handle both interrupts as a
2075 	 * notification that the RAM word at ERRADDR was corrupted.
2076 	 */
2077 	if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2078 		msg = "Single ECC Error detected at address";
2079 	else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2080 		msg = "Double ECC Error detected at address";
2081 	else
2082 		return -EINVAL;
2083 
2084 	if (!in_tx_ram) {
2085 		ecc->ecc_stat = 0;
2086 
2087 		netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2088 	} else {
2089 		/* Re-occurring error? */
2090 		if (ecc->ecc_stat == ecc_stat) {
2091 			ecc->cnt++;
2092 		} else {
2093 			ecc->ecc_stat = ecc_stat;
2094 			ecc->cnt = 1;
2095 		}
2096 
2097 		netdev_info(priv->ndev,
2098 			    "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2099 			    msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2100 
2101 		if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2102 			return mcp251xfd_handle_eccif_recover(priv, nr);
2103 	}
2104 
2105 	if (set_normal_mode)
2106 		return mcp251xfd_chip_set_normal_mode_nowait(priv);
2107 
2108 	return 0;
2109 }
2110 
2111 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2112 {
2113 	int err;
2114 	u32 crc;
2115 
2116 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2117 	if (err)
2118 		return err;
2119 
2120 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2121 				 MCP251XFD_REG_CRC_IF_MASK,
2122 				 ~crc);
2123 	if (err)
2124 		return err;
2125 
2126 	if (crc & MCP251XFD_REG_CRC_FERRIF)
2127 		netdev_notice(priv->ndev, "CRC write command format error.\n");
2128 	else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2129 		netdev_notice(priv->ndev,
2130 			      "CRC write error detected. CRC=0x%04lx.\n",
2131 			      FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2132 
2133 	return 0;
2134 }
2135 
2136 #define mcp251xfd_handle(priv, irq, ...) \
2137 ({ \
2138 	struct mcp251xfd_priv *_priv = (priv); \
2139 	int err; \
2140 \
2141 	err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2142 	if (err) \
2143 		netdev_err(_priv->ndev, \
2144 			"IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2145 			__stringify(irq), err); \
2146 	err; \
2147 })
2148 
2149 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2150 {
2151 	struct mcp251xfd_priv *priv = dev_id;
2152 	const int val_bytes = regmap_get_val_bytes(priv->map_reg);
2153 	irqreturn_t handled = IRQ_NONE;
2154 	int err;
2155 
2156 	if (priv->rx_int)
2157 		do {
2158 			int rx_pending;
2159 
2160 			rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2161 			if (!rx_pending)
2162 				break;
2163 
2164 			err = mcp251xfd_handle(priv, rxif);
2165 			if (err)
2166 				goto out_fail;
2167 
2168 			handled = IRQ_HANDLED;
2169 		} while (1);
2170 
2171 	do {
2172 		u32 intf_pending, intf_pending_clearable;
2173 		bool set_normal_mode = false;
2174 
2175 		err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2176 				       &priv->regs_status,
2177 				       sizeof(priv->regs_status) /
2178 				       val_bytes);
2179 		if (err)
2180 			goto out_fail;
2181 
2182 		intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2183 					 priv->regs_status.intf) &
2184 			FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2185 				  priv->regs_status.intf);
2186 
2187 		if (!(intf_pending))
2188 			return handled;
2189 
2190 		/* Some interrupts must be ACKed in the
2191 		 * MCP251XFD_REG_INT register.
2192 		 * - First ACK then handle, to avoid lost-IRQ race
2193 		 *   condition on fast re-occurring interrupts.
2194 		 * - Write "0" to clear active IRQs, "1" to all other,
2195 		 *   to avoid r/m/w race condition on the
2196 		 *   MCP251XFD_REG_INT register.
2197 		 */
2198 		intf_pending_clearable = intf_pending &
2199 			MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2200 		if (intf_pending_clearable) {
2201 			err = regmap_update_bits(priv->map_reg,
2202 						 MCP251XFD_REG_INT,
2203 						 MCP251XFD_REG_INT_IF_MASK,
2204 						 ~intf_pending_clearable);
2205 			if (err)
2206 				goto out_fail;
2207 		}
2208 
2209 		if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2210 			err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2211 			if (err)
2212 				goto out_fail;
2213 		}
2214 
2215 		if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2216 			err = mcp251xfd_handle(priv, rxif);
2217 			if (err)
2218 				goto out_fail;
2219 		}
2220 
2221 		if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2222 			err = mcp251xfd_handle(priv, tefif);
2223 			if (err)
2224 				goto out_fail;
2225 		}
2226 
2227 		if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2228 			err = mcp251xfd_handle(priv, rxovif);
2229 			if (err)
2230 				goto out_fail;
2231 		}
2232 
2233 		if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2234 			err = mcp251xfd_handle(priv, txatif);
2235 			if (err)
2236 				goto out_fail;
2237 		}
2238 
2239 		if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2240 			err = mcp251xfd_handle(priv, ivmif);
2241 			if (err)
2242 				goto out_fail;
2243 		}
2244 
2245 		if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2246 			err = mcp251xfd_handle(priv, serrif);
2247 			if (err)
2248 				goto out_fail;
2249 		}
2250 
2251 		if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2252 			err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2253 			if (err)
2254 				goto out_fail;
2255 		}
2256 
2257 		if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2258 			err = mcp251xfd_handle(priv, spicrcif);
2259 			if (err)
2260 				goto out_fail;
2261 		}
2262 
2263 		/* On the MCP2527FD and MCP2518FD, we don't get a
2264 		 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2265 		 * ERROR_ACTIVE.
2266 		 */
2267 		if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2268 		    priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2269 			err = mcp251xfd_handle(priv, cerrif);
2270 			if (err)
2271 				goto out_fail;
2272 
2273 			/* In Bus Off we completely shut down the
2274 			 * controller. Every subsequent register read
2275 			 * will read bogus data, and if
2276 			 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2277 			 * check will fail, too. So leave IRQ handler
2278 			 * directly.
2279 			 */
2280 			if (priv->can.state == CAN_STATE_BUS_OFF)
2281 				return IRQ_HANDLED;
2282 		}
2283 
2284 		handled = IRQ_HANDLED;
2285 	} while (1);
2286 
2287  out_fail:
2288 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2289 		   err, priv->regs_status.intf);
2290 	mcp251xfd_chip_interrupts_disable(priv);
2291 
2292 	return handled;
2293 }
2294 
2295 static inline struct
2296 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2297 {
2298 	u8 tx_head;
2299 
2300 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2301 
2302 	return &tx_ring->obj[tx_head];
2303 }
2304 
2305 static void
2306 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2307 			  struct mcp251xfd_tx_obj *tx_obj,
2308 			  const struct sk_buff *skb,
2309 			  unsigned int seq)
2310 {
2311 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2312 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2313 	union mcp251xfd_tx_obj_load_buf *load_buf;
2314 	u8 dlc;
2315 	u32 id, flags;
2316 	int len_sanitized = 0, len;
2317 
2318 	if (cfd->can_id & CAN_EFF_FLAG) {
2319 		u32 sid, eid;
2320 
2321 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2322 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2323 
2324 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2325 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2326 
2327 		flags = MCP251XFD_OBJ_FLAGS_IDE;
2328 	} else {
2329 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2330 		flags = 0;
2331 	}
2332 
2333 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2334 	 * harm, only the lower 7 bits will be transferred into the
2335 	 * TEF object.
2336 	 */
2337 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
2338 
2339 	if (cfd->can_id & CAN_RTR_FLAG)
2340 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
2341 	else
2342 		len_sanitized = canfd_sanitize_len(cfd->len);
2343 
2344 	/* CANFD */
2345 	if (can_is_canfd_skb(skb)) {
2346 		if (cfd->flags & CANFD_ESI)
2347 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
2348 
2349 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
2350 
2351 		if (cfd->flags & CANFD_BRS)
2352 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
2353 
2354 		dlc = can_fd_len2dlc(cfd->len);
2355 	} else {
2356 		dlc = can_get_cc_dlc((struct can_frame *)cfd,
2357 				     priv->can.ctrlmode);
2358 	}
2359 
2360 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
2361 
2362 	load_buf = &tx_obj->buf;
2363 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2364 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
2365 	else
2366 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2367 
2368 	put_unaligned_le32(id, &hw_tx_obj->id);
2369 	put_unaligned_le32(flags, &hw_tx_obj->flags);
2370 
2371 	/* Copy data */
2372 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2373 
2374 	/* Clear unused data at end of CAN frame */
2375 	if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
2376 		int pad_len;
2377 
2378 		pad_len = len_sanitized - cfd->len;
2379 		if (pad_len)
2380 			memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
2381 	}
2382 
2383 	/* Number of bytes to be written into the RAM of the controller */
2384 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2385 	if (MCP251XFD_SANITIZE_CAN)
2386 		len += round_up(len_sanitized, sizeof(u32));
2387 	else
2388 		len += round_up(cfd->len, sizeof(u32));
2389 
2390 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2391 		u16 crc;
2392 
2393 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2394 						     len);
2395 		/* CRC */
2396 		len += sizeof(load_buf->crc.cmd);
2397 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2398 		put_unaligned_be16(crc, (void *)load_buf + len);
2399 
2400 		/* Total length */
2401 		len += sizeof(load_buf->crc.crc);
2402 	} else {
2403 		len += sizeof(load_buf->nocrc.cmd);
2404 	}
2405 
2406 	tx_obj->xfer[0].len = len;
2407 }
2408 
2409 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2410 				  struct mcp251xfd_tx_obj *tx_obj)
2411 {
2412 	return spi_async(priv->spi, &tx_obj->msg);
2413 }
2414 
2415 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2416 			      struct mcp251xfd_tx_ring *tx_ring)
2417 {
2418 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
2419 		return false;
2420 
2421 	netif_stop_queue(priv->ndev);
2422 
2423 	/* Memory barrier before checking tx_free (head and tail) */
2424 	smp_mb();
2425 
2426 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2427 		netdev_dbg(priv->ndev,
2428 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2429 			   tx_ring->head, tx_ring->tail,
2430 			   tx_ring->head - tx_ring->tail);
2431 
2432 		return true;
2433 	}
2434 
2435 	netif_start_queue(priv->ndev);
2436 
2437 	return false;
2438 }
2439 
2440 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2441 					struct net_device *ndev)
2442 {
2443 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2444 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2445 	struct mcp251xfd_tx_obj *tx_obj;
2446 	unsigned int frame_len;
2447 	u8 tx_head;
2448 	int err;
2449 
2450 	if (can_dropped_invalid_skb(ndev, skb))
2451 		return NETDEV_TX_OK;
2452 
2453 	if (mcp251xfd_tx_busy(priv, tx_ring))
2454 		return NETDEV_TX_BUSY;
2455 
2456 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2457 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2458 
2459 	/* Stop queue if we occupy the complete TX FIFO */
2460 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2461 	tx_ring->head++;
2462 	if (mcp251xfd_get_tx_free(tx_ring) == 0)
2463 		netif_stop_queue(ndev);
2464 
2465 	frame_len = can_skb_get_frame_len(skb);
2466 	can_put_echo_skb(skb, ndev, tx_head, frame_len);
2467 	netdev_sent_queue(priv->ndev, frame_len);
2468 
2469 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
2470 	if (err)
2471 		goto out_err;
2472 
2473 	return NETDEV_TX_OK;
2474 
2475  out_err:
2476 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2477 
2478 	return NETDEV_TX_OK;
2479 }
2480 
2481 static int mcp251xfd_open(struct net_device *ndev)
2482 {
2483 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2484 	const struct spi_device *spi = priv->spi;
2485 	int err;
2486 
2487 	err = pm_runtime_get_sync(ndev->dev.parent);
2488 	if (err < 0) {
2489 		pm_runtime_put_noidle(ndev->dev.parent);
2490 		return err;
2491 	}
2492 
2493 	err = open_candev(ndev);
2494 	if (err)
2495 		goto out_pm_runtime_put;
2496 
2497 	err = mcp251xfd_ring_alloc(priv);
2498 	if (err)
2499 		goto out_close_candev;
2500 
2501 	err = mcp251xfd_transceiver_enable(priv);
2502 	if (err)
2503 		goto out_mcp251xfd_ring_free;
2504 
2505 	err = mcp251xfd_chip_start(priv);
2506 	if (err)
2507 		goto out_transceiver_disable;
2508 
2509 	can_rx_offload_enable(&priv->offload);
2510 
2511 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2512 				   IRQF_ONESHOT, dev_name(&spi->dev),
2513 				   priv);
2514 	if (err)
2515 		goto out_can_rx_offload_disable;
2516 
2517 	err = mcp251xfd_chip_interrupts_enable(priv);
2518 	if (err)
2519 		goto out_free_irq;
2520 
2521 	netif_start_queue(ndev);
2522 
2523 	return 0;
2524 
2525  out_free_irq:
2526 	free_irq(spi->irq, priv);
2527  out_can_rx_offload_disable:
2528 	can_rx_offload_disable(&priv->offload);
2529  out_transceiver_disable:
2530 	mcp251xfd_transceiver_disable(priv);
2531  out_mcp251xfd_ring_free:
2532 	mcp251xfd_ring_free(priv);
2533  out_close_candev:
2534 	close_candev(ndev);
2535  out_pm_runtime_put:
2536 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2537 	pm_runtime_put(ndev->dev.parent);
2538 
2539 	return err;
2540 }
2541 
2542 static int mcp251xfd_stop(struct net_device *ndev)
2543 {
2544 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2545 
2546 	netif_stop_queue(ndev);
2547 	mcp251xfd_chip_interrupts_disable(priv);
2548 	free_irq(ndev->irq, priv);
2549 	can_rx_offload_disable(&priv->offload);
2550 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2551 	mcp251xfd_transceiver_disable(priv);
2552 	mcp251xfd_ring_free(priv);
2553 	close_candev(ndev);
2554 
2555 	pm_runtime_put(ndev->dev.parent);
2556 
2557 	return 0;
2558 }
2559 
2560 static const struct net_device_ops mcp251xfd_netdev_ops = {
2561 	.ndo_open = mcp251xfd_open,
2562 	.ndo_stop = mcp251xfd_stop,
2563 	.ndo_start_xmit	= mcp251xfd_start_xmit,
2564 	.ndo_change_mtu = can_change_mtu,
2565 };
2566 
2567 static void
2568 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2569 {
2570 	const struct spi_device *spi = priv->spi;
2571 	const struct spi_controller *ctlr = spi->controller;
2572 
2573 	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2574 		priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2575 }
2576 
2577 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2578 {
2579 	const struct net_device *ndev = priv->ndev;
2580 	const struct mcp251xfd_devtype_data *devtype_data;
2581 	u32 osc;
2582 	int err;
2583 
2584 	/* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2585 	 * autodetect the model.
2586 	 */
2587 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2588 				 MCP251XFD_REG_OSC_LPMEN,
2589 				 MCP251XFD_REG_OSC_LPMEN);
2590 	if (err)
2591 		return err;
2592 
2593 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2594 	if (err)
2595 		return err;
2596 
2597 	if (osc & MCP251XFD_REG_OSC_LPMEN)
2598 		devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2599 	else
2600 		devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2601 
2602 	if (!mcp251xfd_is_251X(priv) &&
2603 	    priv->devtype_data.model != devtype_data->model) {
2604 		netdev_info(ndev,
2605 			    "Detected %s, but firmware specifies a %s. Fixing up.",
2606 			    __mcp251xfd_get_model_str(devtype_data->model),
2607 			    mcp251xfd_get_model_str(priv));
2608 	}
2609 	priv->devtype_data = *devtype_data;
2610 
2611 	/* We need to preserve the Half Duplex Quirk. */
2612 	mcp251xfd_register_quirks(priv);
2613 
2614 	/* Re-init regmap with quirks of detected model. */
2615 	return mcp251xfd_regmap_init(priv);
2616 }
2617 
2618 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2619 {
2620 	int err, rx_pending;
2621 
2622 	if (!priv->rx_int)
2623 		return 0;
2624 
2625 	err = mcp251xfd_chip_rx_int_enable(priv);
2626 	if (err)
2627 		return err;
2628 
2629 	/* Check if RX_INT is properly working. The RX_INT should not
2630 	 * be active after a softreset.
2631 	 */
2632 	rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2633 
2634 	err = mcp251xfd_chip_rx_int_disable(priv);
2635 	if (err)
2636 		return err;
2637 
2638 	if (!rx_pending)
2639 		return 0;
2640 
2641 	netdev_info(priv->ndev,
2642 		    "RX_INT active after softreset, disabling RX_INT support.");
2643 	devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2644 	priv->rx_int = NULL;
2645 
2646 	return 0;
2647 }
2648 
2649 static int
2650 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2651 			      u32 *dev_id, u32 *effective_speed_hz)
2652 {
2653 	struct mcp251xfd_map_buf_nocrc *buf_rx;
2654 	struct mcp251xfd_map_buf_nocrc *buf_tx;
2655 	struct spi_transfer xfer[2] = { };
2656 	int err;
2657 
2658 	buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2659 	if (!buf_rx)
2660 		return -ENOMEM;
2661 
2662 	buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2663 	if (!buf_tx) {
2664 		err = -ENOMEM;
2665 		goto out_kfree_buf_rx;
2666 	}
2667 
2668 	xfer[0].tx_buf = buf_tx;
2669 	xfer[0].len = sizeof(buf_tx->cmd);
2670 	xfer[1].rx_buf = buf_rx->data;
2671 	xfer[1].len = sizeof(dev_id);
2672 
2673 	mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2674 	err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2675 	if (err)
2676 		goto out_kfree_buf_tx;
2677 
2678 	*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2679 	*effective_speed_hz = xfer->effective_speed_hz;
2680 
2681  out_kfree_buf_tx:
2682 	kfree(buf_tx);
2683  out_kfree_buf_rx:
2684 	kfree(buf_rx);
2685 
2686 	return 0;
2687 }
2688 
2689 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2690 	(priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2691 
2692 static int
2693 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2694 {
2695 	u32 dev_id, effective_speed_hz;
2696 	int err;
2697 
2698 	err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2699 					    &effective_speed_hz);
2700 	if (err)
2701 		return err;
2702 
2703 	netdev_info(priv->ndev,
2704 		    "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2705 		    mcp251xfd_get_model_str(priv),
2706 		    FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2707 		    FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2708 		    priv->rx_int ? '+' : '-',
2709 		    MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2710 		    MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2711 		    MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2712 		    MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2713 		    MCP251XFD_QUIRK_ACTIVE(ECC),
2714 		    MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2715 		    priv->can.clock.freq / 1000000,
2716 		    priv->can.clock.freq % 1000000 / 1000 / 10,
2717 		    priv->spi_max_speed_hz_orig / 1000000,
2718 		    priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2719 		    priv->spi->max_speed_hz / 1000000,
2720 		    priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2721 		    effective_speed_hz / 1000000,
2722 		    effective_speed_hz % 1000000 / 1000 / 10);
2723 
2724 	return 0;
2725 }
2726 
2727 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2728 {
2729 	struct net_device *ndev = priv->ndev;
2730 	int err;
2731 
2732 	err = mcp251xfd_clks_and_vdd_enable(priv);
2733 	if (err)
2734 		return err;
2735 
2736 	pm_runtime_get_noresume(ndev->dev.parent);
2737 	err = pm_runtime_set_active(ndev->dev.parent);
2738 	if (err)
2739 		goto out_runtime_put_noidle;
2740 	pm_runtime_enable(ndev->dev.parent);
2741 
2742 	mcp251xfd_register_quirks(priv);
2743 
2744 	err = mcp251xfd_chip_softreset(priv);
2745 	if (err == -ENODEV)
2746 		goto out_runtime_disable;
2747 	if (err)
2748 		goto out_chip_set_mode_sleep;
2749 
2750 	err = mcp251xfd_register_chip_detect(priv);
2751 	if (err)
2752 		goto out_chip_set_mode_sleep;
2753 
2754 	err = mcp251xfd_register_check_rx_int(priv);
2755 	if (err)
2756 		goto out_chip_set_mode_sleep;
2757 
2758 	err = register_candev(ndev);
2759 	if (err)
2760 		goto out_chip_set_mode_sleep;
2761 
2762 	err = mcp251xfd_register_done(priv);
2763 	if (err)
2764 		goto out_unregister_candev;
2765 
2766 	/* Put controller into sleep mode and let pm_runtime_put()
2767 	 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2768 	 * the clocks and vdd will stay powered.
2769 	 */
2770 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2771 	if (err)
2772 		goto out_unregister_candev;
2773 
2774 	pm_runtime_put(ndev->dev.parent);
2775 
2776 	return 0;
2777 
2778  out_unregister_candev:
2779 	unregister_candev(ndev);
2780  out_chip_set_mode_sleep:
2781 	mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2782  out_runtime_disable:
2783 	pm_runtime_disable(ndev->dev.parent);
2784  out_runtime_put_noidle:
2785 	pm_runtime_put_noidle(ndev->dev.parent);
2786 	mcp251xfd_clks_and_vdd_disable(priv);
2787 
2788 	return err;
2789 }
2790 
2791 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2792 {
2793 	struct net_device *ndev	= priv->ndev;
2794 
2795 	unregister_candev(ndev);
2796 
2797 	pm_runtime_get_sync(ndev->dev.parent);
2798 	pm_runtime_put_noidle(ndev->dev.parent);
2799 	mcp251xfd_clks_and_vdd_disable(priv);
2800 	pm_runtime_disable(ndev->dev.parent);
2801 }
2802 
2803 static const struct of_device_id mcp251xfd_of_match[] = {
2804 	{
2805 		.compatible = "microchip,mcp2517fd",
2806 		.data = &mcp251xfd_devtype_data_mcp2517fd,
2807 	}, {
2808 		.compatible = "microchip,mcp2518fd",
2809 		.data = &mcp251xfd_devtype_data_mcp2518fd,
2810 	}, {
2811 		.compatible = "microchip,mcp251xfd",
2812 		.data = &mcp251xfd_devtype_data_mcp251xfd,
2813 	}, {
2814 		/* sentinel */
2815 	},
2816 };
2817 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2818 
2819 static const struct spi_device_id mcp251xfd_id_table[] = {
2820 	{
2821 		.name = "mcp2517fd",
2822 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2823 	}, {
2824 		.name = "mcp2518fd",
2825 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2826 	}, {
2827 		.name = "mcp251xfd",
2828 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2829 	}, {
2830 		/* sentinel */
2831 	},
2832 };
2833 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2834 
2835 static int mcp251xfd_probe(struct spi_device *spi)
2836 {
2837 	const void *match;
2838 	struct net_device *ndev;
2839 	struct mcp251xfd_priv *priv;
2840 	struct gpio_desc *rx_int;
2841 	struct regulator *reg_vdd, *reg_xceiver;
2842 	struct clk *clk;
2843 	u32 freq;
2844 	int err;
2845 
2846 	if (!spi->irq)
2847 		return dev_err_probe(&spi->dev, -ENXIO,
2848 				     "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2849 
2850 	rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2851 					 GPIOD_IN);
2852 	if (IS_ERR(rx_int))
2853 		return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
2854 				     "Failed to get RX-INT!\n");
2855 
2856 	reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2857 	if (PTR_ERR(reg_vdd) == -ENODEV)
2858 		reg_vdd = NULL;
2859 	else if (IS_ERR(reg_vdd))
2860 		return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
2861 				     "Failed to get VDD regulator!\n");
2862 
2863 	reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2864 	if (PTR_ERR(reg_xceiver) == -ENODEV)
2865 		reg_xceiver = NULL;
2866 	else if (IS_ERR(reg_xceiver))
2867 		return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
2868 				     "Failed to get Transceiver regulator!\n");
2869 
2870 	clk = devm_clk_get(&spi->dev, NULL);
2871 	if (IS_ERR(clk))
2872 		dev_err_probe(&spi->dev, PTR_ERR(clk),
2873 			      "Failed to get Oscillator (clock)!\n");
2874 	freq = clk_get_rate(clk);
2875 
2876 	/* Sanity check */
2877 	if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2878 	    freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2879 		dev_err(&spi->dev,
2880 			"Oscillator frequency (%u Hz) is too low or high.\n",
2881 			freq);
2882 		return -ERANGE;
2883 	}
2884 
2885 	if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2886 		dev_err(&spi->dev,
2887 			"Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2888 			freq);
2889 		return -ERANGE;
2890 	}
2891 
2892 	ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2893 			    MCP251XFD_TX_OBJ_NUM_MAX);
2894 	if (!ndev)
2895 		return -ENOMEM;
2896 
2897 	SET_NETDEV_DEV(ndev, &spi->dev);
2898 
2899 	ndev->netdev_ops = &mcp251xfd_netdev_ops;
2900 	ndev->irq = spi->irq;
2901 	ndev->flags |= IFF_ECHO;
2902 
2903 	priv = netdev_priv(ndev);
2904 	spi_set_drvdata(spi, priv);
2905 	priv->can.clock.freq = freq;
2906 	priv->can.do_set_mode = mcp251xfd_set_mode;
2907 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2908 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2909 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2910 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2911 		CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
2912 		CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
2913 		CAN_CTRLMODE_CC_LEN8_DLC;
2914 	priv->ndev = ndev;
2915 	priv->spi = spi;
2916 	priv->rx_int = rx_int;
2917 	priv->clk = clk;
2918 	priv->reg_vdd = reg_vdd;
2919 	priv->reg_xceiver = reg_xceiver;
2920 
2921 	match = device_get_match_data(&spi->dev);
2922 	if (match)
2923 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2924 	else
2925 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2926 			spi_get_device_id(spi)->driver_data;
2927 
2928 	/* Errata Reference:
2929 	 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
2930 	 *
2931 	 * The SPI can write corrupted data to the RAM at fast SPI
2932 	 * speeds:
2933 	 *
2934 	 * Simultaneous activity on the CAN bus while writing data to
2935 	 * RAM via the SPI interface, with high SCK frequency, can
2936 	 * lead to corrupted data being written to RAM.
2937 	 *
2938 	 * Fix/Work Around:
2939 	 * Ensure that FSCK is less than or equal to 0.85 *
2940 	 * (FSYSCLK/2).
2941 	 *
2942 	 * Known good combinations are:
2943 	 *
2944 	 * MCP	ext-clk	SoC			SPI			SPI-clk		max-clk	parent-clk	config
2945 	 *
2946 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 8333333 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2947 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	16666667 Hz	 83.33%	600000000 Hz	assigned-clocks = <&ccu CLK_SPIx>
2948 	 * 2517	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2949 	 * 2518	40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	default
2950 	 * 2518	40 MHz	fsl,imx6dl		fsl,imx51-ecspi		15000000 Hz	 75.00%	 30000000 Hz	default
2951 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 8333333 Hz	 83.33%	 16666667 Hz	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2952 	 *
2953 	 */
2954 	priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2955 	spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2956 	spi->bits_per_word = 8;
2957 	spi->rt = true;
2958 	err = spi_setup(spi);
2959 	if (err)
2960 		goto out_free_candev;
2961 
2962 	err = mcp251xfd_regmap_init(priv);
2963 	if (err)
2964 		goto out_free_candev;
2965 
2966 	err = can_rx_offload_add_manual(ndev, &priv->offload,
2967 					MCP251XFD_NAPI_WEIGHT);
2968 	if (err)
2969 		goto out_free_candev;
2970 
2971 	err = mcp251xfd_register(priv);
2972 	if (err)
2973 		goto out_free_candev;
2974 
2975 	return 0;
2976 
2977  out_free_candev:
2978 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2979 
2980 	free_candev(ndev);
2981 
2982 	return err;
2983 }
2984 
2985 static int mcp251xfd_remove(struct spi_device *spi)
2986 {
2987 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
2988 	struct net_device *ndev = priv->ndev;
2989 
2990 	can_rx_offload_del(&priv->offload);
2991 	mcp251xfd_unregister(priv);
2992 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2993 	free_candev(ndev);
2994 
2995 	return 0;
2996 }
2997 
2998 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
2999 {
3000 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3001 
3002 	return mcp251xfd_clks_and_vdd_disable(priv);
3003 }
3004 
3005 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
3006 {
3007 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3008 
3009 	return mcp251xfd_clks_and_vdd_enable(priv);
3010 }
3011 
3012 static const struct dev_pm_ops mcp251xfd_pm_ops = {
3013 	SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
3014 			   mcp251xfd_runtime_resume, NULL)
3015 };
3016 
3017 static struct spi_driver mcp251xfd_driver = {
3018 	.driver = {
3019 		.name = DEVICE_NAME,
3020 		.pm = &mcp251xfd_pm_ops,
3021 		.of_match_table = mcp251xfd_of_match,
3022 	},
3023 	.probe = mcp251xfd_probe,
3024 	.remove = mcp251xfd_remove,
3025 	.id_table = mcp251xfd_id_table,
3026 };
3027 module_spi_driver(mcp251xfd_driver);
3028 
3029 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3030 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3031 MODULE_LICENSE("GPL v2");
3032