xref: /openbmc/linux/drivers/net/can/bxcan.c (revision 7eb1e476)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // bxcan.c - STM32 Basic Extended CAN controller driver
4 //
5 // Copyright (c) 2022 Dario Binacchi <dario.binacchi@amarulasolutions.com>
6 //
7 // NOTE: The ST documentation uses the terms master/slave instead of
8 // primary/secondary.
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/bitfield.h>
13 #include <linux/can.h>
14 #include <linux/can/dev.h>
15 #include <linux/can/error.h>
16 #include <linux/can/rx-offload.h>
17 #include <linux/clk.h>
18 #include <linux/ethtool.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/regmap.h>
29 
30 #define BXCAN_NAPI_WEIGHT 3
31 #define BXCAN_TIMEOUT_US 10000
32 
33 #define BXCAN_RX_MB_NUM 2
34 #define BXCAN_TX_MB_NUM 3
35 
36 /* Primary control register (MCR) bits */
37 #define BXCAN_MCR_RESET BIT(15)
38 #define BXCAN_MCR_TTCM BIT(7)
39 #define BXCAN_MCR_ABOM BIT(6)
40 #define BXCAN_MCR_AWUM BIT(5)
41 #define BXCAN_MCR_NART BIT(4)
42 #define BXCAN_MCR_RFLM BIT(3)
43 #define BXCAN_MCR_TXFP BIT(2)
44 #define BXCAN_MCR_SLEEP BIT(1)
45 #define BXCAN_MCR_INRQ BIT(0)
46 
47 /* Primary status register (MSR) bits */
48 #define BXCAN_MSR_ERRI BIT(2)
49 #define BXCAN_MSR_SLAK BIT(1)
50 #define BXCAN_MSR_INAK BIT(0)
51 
52 /* Transmit status register (TSR) bits */
53 #define BXCAN_TSR_RQCP2 BIT(16)
54 #define BXCAN_TSR_RQCP1 BIT(8)
55 #define BXCAN_TSR_RQCP0 BIT(0)
56 
57 /* Receive FIFO 0 register (RF0R) bits */
58 #define BXCAN_RF0R_RFOM0 BIT(5)
59 #define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0)
60 
61 /* Interrupt enable register (IER) bits */
62 #define BXCAN_IER_SLKIE BIT(17)
63 #define BXCAN_IER_WKUIE BIT(16)
64 #define BXCAN_IER_ERRIE BIT(15)
65 #define BXCAN_IER_LECIE BIT(11)
66 #define BXCAN_IER_BOFIE BIT(10)
67 #define BXCAN_IER_EPVIE BIT(9)
68 #define BXCAN_IER_EWGIE BIT(8)
69 #define BXCAN_IER_FOVIE1 BIT(6)
70 #define BXCAN_IER_FFIE1 BIT(5)
71 #define BXCAN_IER_FMPIE1 BIT(4)
72 #define BXCAN_IER_FOVIE0 BIT(3)
73 #define BXCAN_IER_FFIE0 BIT(2)
74 #define BXCAN_IER_FMPIE0 BIT(1)
75 #define BXCAN_IER_TMEIE BIT(0)
76 
77 /* Error status register (ESR) bits */
78 #define BXCAN_ESR_REC_MASK GENMASK(31, 24)
79 #define BXCAN_ESR_TEC_MASK GENMASK(23, 16)
80 #define BXCAN_ESR_LEC_MASK GENMASK(6, 4)
81 #define BXCAN_ESR_BOFF BIT(2)
82 #define BXCAN_ESR_EPVF BIT(1)
83 #define BXCAN_ESR_EWGF BIT(0)
84 
85 /* Bit timing register (BTR) bits */
86 #define BXCAN_BTR_SILM BIT(31)
87 #define BXCAN_BTR_LBKM BIT(30)
88 #define BXCAN_BTR_SJW_MASK GENMASK(25, 24)
89 #define BXCAN_BTR_TS2_MASK GENMASK(22, 20)
90 #define BXCAN_BTR_TS1_MASK GENMASK(19, 16)
91 #define BXCAN_BTR_BRP_MASK GENMASK(9, 0)
92 
93 /* TX mailbox identifier register (TIxR, x = 0..2) bits */
94 #define BXCAN_TIxR_STID_MASK GENMASK(31, 21)
95 #define BXCAN_TIxR_EXID_MASK GENMASK(31, 3)
96 #define BXCAN_TIxR_IDE BIT(2)
97 #define BXCAN_TIxR_RTR BIT(1)
98 #define BXCAN_TIxR_TXRQ BIT(0)
99 
100 /* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */
101 #define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0)
102 
103 /* RX FIFO mailbox identifier register (RIxR, x = 0..1 */
104 #define BXCAN_RIxR_STID_MASK GENMASK(31, 21)
105 #define BXCAN_RIxR_EXID_MASK GENMASK(31, 3)
106 #define BXCAN_RIxR_IDE BIT(2)
107 #define BXCAN_RIxR_RTR BIT(1)
108 
109 /* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */
110 #define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16)
111 #define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0)
112 
113 #define BXCAN_FMR_REG 0x00
114 #define BXCAN_FM1R_REG 0x04
115 #define BXCAN_FS1R_REG 0x0c
116 #define BXCAN_FFA1R_REG 0x14
117 #define BXCAN_FA1R_REG 0x1c
118 #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
119 #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
120 
121 #define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
122 
123 /* Filter primary register (FMR) bits */
124 #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
125 #define BXCAN_FMR_FINIT BIT(0)
126 
127 enum bxcan_lec_code {
128 	BXCAN_LEC_NO_ERROR = 0,
129 	BXCAN_LEC_STUFF_ERROR,
130 	BXCAN_LEC_FORM_ERROR,
131 	BXCAN_LEC_ACK_ERROR,
132 	BXCAN_LEC_BIT1_ERROR,
133 	BXCAN_LEC_BIT0_ERROR,
134 	BXCAN_LEC_CRC_ERROR,
135 	BXCAN_LEC_UNUSED
136 };
137 
138 enum bxcan_cfg {
139 	BXCAN_CFG_SINGLE = 0,
140 	BXCAN_CFG_DUAL_PRIMARY,
141 	BXCAN_CFG_DUAL_SECONDARY
142 };
143 
144 /* Structure of the message buffer */
145 struct bxcan_mb {
146 	u32 id;			/* can identifier */
147 	u32 dlc;		/* data length control and timestamp */
148 	u32 data[2];		/* data */
149 };
150 
151 /* Structure of the hardware registers */
152 struct bxcan_regs {
153 	u32 mcr;			/* 0x00 - primary control */
154 	u32 msr;			/* 0x04 - primary status */
155 	u32 tsr;			/* 0x08 - transmit status */
156 	u32 rf0r;			/* 0x0c - FIFO 0 */
157 	u32 rf1r;			/* 0x10 - FIFO 1 */
158 	u32 ier;			/* 0x14 - interrupt enable */
159 	u32 esr;			/* 0x18 - error status */
160 	u32 btr;			/* 0x1c - bit timing*/
161 	u32 reserved0[88];		/* 0x20 */
162 	struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM];	/* 0x180 - tx mailbox */
163 	struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM];	/* 0x1b0 - rx mailbox */
164 };
165 
166 struct bxcan_priv {
167 	struct can_priv can;
168 	struct can_rx_offload offload;
169 	struct device *dev;
170 	struct net_device *ndev;
171 
172 	struct bxcan_regs __iomem *regs;
173 	struct regmap *gcan;
174 	int tx_irq;
175 	int sce_irq;
176 	enum bxcan_cfg cfg;
177 	struct clk *clk;
178 	spinlock_t rmw_lock;	/* lock for read-modify-write operations */
179 	unsigned int tx_head;
180 	unsigned int tx_tail;
181 	u32 timestamp;
182 };
183 
184 static const struct can_bittiming_const bxcan_bittiming_const = {
185 	.name = KBUILD_MODNAME,
186 	.tseg1_min = 1,
187 	.tseg1_max = 16,
188 	.tseg2_min = 1,
189 	.tseg2_max = 8,
190 	.sjw_max = 4,
191 	.brp_min = 1,
192 	.brp_max = 1024,
193 	.brp_inc = 1,
194 };
195 
196 static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
197 			     u32 clear, u32 set)
198 {
199 	unsigned long flags;
200 	u32 old, val;
201 
202 	spin_lock_irqsave(&priv->rmw_lock, flags);
203 	old = readl(addr);
204 	val = (old & ~clear) | set;
205 	if (val != old)
206 		writel(val, addr);
207 
208 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
209 }
210 
211 static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
212 {
213 	unsigned int fid = BXCAN_FILTER_ID(cfg);
214 	u32 fmask = BIT(fid);
215 
216 	regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
217 }
218 
219 static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
220 {
221 	unsigned int fid = BXCAN_FILTER_ID(cfg);
222 	u32 fmask = BIT(fid);
223 
224 	/* Filter settings:
225 	 *
226 	 * Accept all messages.
227 	 * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier
228 	 * mask mode with 32 bits width.
229 	 */
230 
231 	/* Enter filter initialization mode and assing filters to CAN
232 	 * controllers.
233 	 */
234 	regmap_update_bits(priv->gcan, BXCAN_FMR_REG,
235 			   BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT,
236 			   FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) |
237 			   BXCAN_FMR_FINIT);
238 
239 	/* Deactivate filter */
240 	regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
241 
242 	/* Two 32-bit registers in identifier mask mode */
243 	regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0);
244 
245 	/* Single 32-bit scale configuration */
246 	regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask);
247 
248 	/* Assign filter to FIFO 0 */
249 	regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0);
250 
251 	/* Accept all messages */
252 	regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0);
253 	regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0);
254 
255 	/* Activate filter */
256 	regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask);
257 
258 	/* Exit filter initialization mode */
259 	regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0);
260 }
261 
262 static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv)
263 {
264 	return priv->tx_head % BXCAN_TX_MB_NUM;
265 }
266 
267 static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv)
268 {
269 	return priv->tx_tail % BXCAN_TX_MB_NUM;
270 }
271 
272 static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv)
273 {
274 	return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail);
275 }
276 
277 static bool bxcan_tx_busy(const struct bxcan_priv *priv)
278 {
279 	if (bxcan_get_tx_free(priv) > 0)
280 		return false;
281 
282 	netif_stop_queue(priv->ndev);
283 
284 	/* Memory barrier before checking tx_free (head and tail) */
285 	smp_mb();
286 
287 	if (bxcan_get_tx_free(priv) == 0) {
288 		netdev_dbg(priv->ndev,
289 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
290 			   priv->tx_head, priv->tx_tail,
291 			   priv->tx_head - priv->tx_tail);
292 
293 		return true;
294 	}
295 
296 	netif_start_queue(priv->ndev);
297 
298 	return false;
299 }
300 
301 static int bxcan_chip_softreset(struct bxcan_priv *priv)
302 {
303 	struct bxcan_regs __iomem *regs = priv->regs;
304 	u32 value;
305 
306 	bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_RESET);
307 	return readx_poll_timeout(readl, &regs->msr, value,
308 				  value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
309 				  USEC_PER_SEC);
310 }
311 
312 static int bxcan_enter_init_mode(struct bxcan_priv *priv)
313 {
314 	struct bxcan_regs __iomem *regs = priv->regs;
315 	u32 value;
316 
317 	bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_INRQ);
318 	return readx_poll_timeout(readl, &regs->msr, value,
319 				  value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US,
320 				  USEC_PER_SEC);
321 }
322 
323 static int bxcan_leave_init_mode(struct bxcan_priv *priv)
324 {
325 	struct bxcan_regs __iomem *regs = priv->regs;
326 	u32 value;
327 
328 	bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_INRQ, 0);
329 	return readx_poll_timeout(readl, &regs->msr, value,
330 				  !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US,
331 				  USEC_PER_SEC);
332 }
333 
334 static int bxcan_enter_sleep_mode(struct bxcan_priv *priv)
335 {
336 	struct bxcan_regs __iomem *regs = priv->regs;
337 	u32 value;
338 
339 	bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_SLEEP);
340 	return readx_poll_timeout(readl, &regs->msr, value,
341 				  value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
342 				  USEC_PER_SEC);
343 }
344 
345 static int bxcan_leave_sleep_mode(struct bxcan_priv *priv)
346 {
347 	struct bxcan_regs __iomem *regs = priv->regs;
348 	u32 value;
349 
350 	bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_SLEEP, 0);
351 	return readx_poll_timeout(readl, &regs->msr, value,
352 				  !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US,
353 				  USEC_PER_SEC);
354 }
355 
356 static inline
357 struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
358 {
359 	return container_of(offload, struct bxcan_priv, offload);
360 }
361 
362 static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload,
363 					  unsigned int mbxno, u32 *timestamp,
364 					  bool drop)
365 {
366 	struct bxcan_priv *priv = rx_offload_to_priv(offload);
367 	struct bxcan_regs __iomem *regs = priv->regs;
368 	struct bxcan_mb __iomem *mb_regs = &regs->rx_mb[0];
369 	struct sk_buff *skb = NULL;
370 	struct can_frame *cf;
371 	u32 rf0r, id, dlc;
372 
373 	rf0r = readl(&regs->rf0r);
374 	if (unlikely(drop)) {
375 		skb = ERR_PTR(-ENOBUFS);
376 		goto mark_as_read;
377 	}
378 
379 	if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
380 		goto mark_as_read;
381 
382 	skb = alloc_can_skb(offload->dev, &cf);
383 	if (unlikely(!skb)) {
384 		skb = ERR_PTR(-ENOMEM);
385 		goto mark_as_read;
386 	}
387 
388 	id = readl(&mb_regs->id);
389 	if (id & BXCAN_RIxR_IDE)
390 		cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG;
391 	else
392 		cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK;
393 
394 	dlc = readl(&mb_regs->dlc);
395 	priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc);
396 	cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc));
397 
398 	if (id & BXCAN_RIxR_RTR) {
399 		cf->can_id |= CAN_RTR_FLAG;
400 	} else {
401 		int i, j;
402 
403 		for (i = 0, j = 0; i < cf->len; i += 4, j++)
404 			*(u32 *)(cf->data + i) = readl(&mb_regs->data[j]);
405 	}
406 
407  mark_as_read:
408 	rf0r |= BXCAN_RF0R_RFOM0;
409 	writel(rf0r, &regs->rf0r);
410 	return skb;
411 }
412 
413 static irqreturn_t bxcan_rx_isr(int irq, void *dev_id)
414 {
415 	struct net_device *ndev = dev_id;
416 	struct bxcan_priv *priv = netdev_priv(ndev);
417 	struct bxcan_regs __iomem *regs = priv->regs;
418 	u32 rf0r;
419 
420 	rf0r = readl(&regs->rf0r);
421 	if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
422 		return IRQ_NONE;
423 
424 	can_rx_offload_irq_offload_fifo(&priv->offload);
425 	can_rx_offload_irq_finish(&priv->offload);
426 
427 	return IRQ_HANDLED;
428 }
429 
430 static irqreturn_t bxcan_tx_isr(int irq, void *dev_id)
431 {
432 	struct net_device *ndev = dev_id;
433 	struct bxcan_priv *priv = netdev_priv(ndev);
434 	struct bxcan_regs __iomem *regs = priv->regs;
435 	struct net_device_stats *stats = &ndev->stats;
436 	u32 tsr, rqcp_bit;
437 	int idx;
438 
439 	tsr = readl(&regs->tsr);
440 	if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2)))
441 		return IRQ_NONE;
442 
443 	while (priv->tx_head - priv->tx_tail > 0) {
444 		idx = bxcan_get_tx_tail(priv);
445 		rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3);
446 		if (!(tsr & rqcp_bit))
447 			break;
448 
449 		stats->tx_packets++;
450 		stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL);
451 		priv->tx_tail++;
452 	}
453 
454 	writel(tsr, &regs->tsr);
455 
456 	if (bxcan_get_tx_free(priv)) {
457 		/* Make sure that anybody stopping the queue after
458 		 * this sees the new tx_ring->tail.
459 		 */
460 		smp_mb();
461 		netif_wake_queue(ndev);
462 	}
463 
464 	return IRQ_HANDLED;
465 }
466 
467 static void bxcan_handle_state_change(struct net_device *ndev, u32 esr)
468 {
469 	struct bxcan_priv *priv = netdev_priv(ndev);
470 	enum can_state new_state = priv->can.state;
471 	struct can_berr_counter bec;
472 	enum can_state rx_state, tx_state;
473 	struct sk_buff *skb;
474 	struct can_frame *cf;
475 
476 	/* Early exit if no error flag is set */
477 	if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF)))
478 		return;
479 
480 	bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
481 	bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
482 
483 	if (esr & BXCAN_ESR_BOFF)
484 		new_state = CAN_STATE_BUS_OFF;
485 	else if (esr & BXCAN_ESR_EPVF)
486 		new_state = CAN_STATE_ERROR_PASSIVE;
487 	else if (esr & BXCAN_ESR_EWGF)
488 		new_state = CAN_STATE_ERROR_WARNING;
489 
490 	/* state hasn't changed */
491 	if (unlikely(new_state == priv->can.state))
492 		return;
493 
494 	skb = alloc_can_err_skb(ndev, &cf);
495 
496 	tx_state = bec.txerr >= bec.rxerr ? new_state : 0;
497 	rx_state = bec.txerr <= bec.rxerr ? new_state : 0;
498 	can_change_state(ndev, cf, tx_state, rx_state);
499 
500 	if (new_state == CAN_STATE_BUS_OFF) {
501 		can_bus_off(ndev);
502 	} else if (skb) {
503 		cf->can_id |= CAN_ERR_CNT;
504 		cf->data[6] = bec.txerr;
505 		cf->data[7] = bec.rxerr;
506 	}
507 
508 	if (skb) {
509 		int err;
510 
511 		err = can_rx_offload_queue_timestamp(&priv->offload, skb,
512 						     priv->timestamp);
513 		if (err)
514 			ndev->stats.rx_fifo_errors++;
515 	}
516 }
517 
518 static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr)
519 {
520 	struct bxcan_priv *priv = netdev_priv(ndev);
521 	enum bxcan_lec_code lec_code;
522 	struct can_frame *cf;
523 	struct sk_buff *skb;
524 
525 	lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr);
526 
527 	/* Early exit if no lec update or no error.
528 	 * No lec update means that no CAN bus event has been detected
529 	 * since CPU wrote BXCAN_LEC_UNUSED value to status reg.
530 	 */
531 	if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR)
532 		return;
533 
534 	/* Common for all type of bus errors */
535 	priv->can.can_stats.bus_error++;
536 
537 	/* Propagate the error condition to the CAN stack */
538 	skb = alloc_can_err_skb(ndev, &cf);
539 	if (skb)
540 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
541 
542 	switch (lec_code) {
543 	case BXCAN_LEC_STUFF_ERROR:
544 		netdev_dbg(ndev, "Stuff error\n");
545 		ndev->stats.rx_errors++;
546 		if (skb)
547 			cf->data[2] |= CAN_ERR_PROT_STUFF;
548 		break;
549 
550 	case BXCAN_LEC_FORM_ERROR:
551 		netdev_dbg(ndev, "Form error\n");
552 		ndev->stats.rx_errors++;
553 		if (skb)
554 			cf->data[2] |= CAN_ERR_PROT_FORM;
555 		break;
556 
557 	case BXCAN_LEC_ACK_ERROR:
558 		netdev_dbg(ndev, "Ack error\n");
559 		ndev->stats.tx_errors++;
560 		if (skb) {
561 			cf->can_id |= CAN_ERR_ACK;
562 			cf->data[3] = CAN_ERR_PROT_LOC_ACK;
563 		}
564 		break;
565 
566 	case BXCAN_LEC_BIT1_ERROR:
567 		netdev_dbg(ndev, "Bit error (recessive)\n");
568 		ndev->stats.tx_errors++;
569 		if (skb)
570 			cf->data[2] |= CAN_ERR_PROT_BIT1;
571 		break;
572 
573 	case BXCAN_LEC_BIT0_ERROR:
574 		netdev_dbg(ndev, "Bit error (dominant)\n");
575 		ndev->stats.tx_errors++;
576 		if (skb)
577 			cf->data[2] |= CAN_ERR_PROT_BIT0;
578 		break;
579 
580 	case BXCAN_LEC_CRC_ERROR:
581 		netdev_dbg(ndev, "CRC error\n");
582 		ndev->stats.rx_errors++;
583 		if (skb) {
584 			cf->data[2] |= CAN_ERR_PROT_BIT;
585 			cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
586 		}
587 		break;
588 
589 	default:
590 		break;
591 	}
592 
593 	if (skb) {
594 		int err;
595 
596 		err = can_rx_offload_queue_timestamp(&priv->offload, skb,
597 						     priv->timestamp);
598 		if (err)
599 			ndev->stats.rx_fifo_errors++;
600 	}
601 }
602 
603 static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id)
604 {
605 	struct net_device *ndev = dev_id;
606 	struct bxcan_priv *priv = netdev_priv(ndev);
607 	struct bxcan_regs __iomem *regs = priv->regs;
608 	u32 msr, esr;
609 
610 	msr = readl(&regs->msr);
611 	if (!(msr & BXCAN_MSR_ERRI))
612 		return IRQ_NONE;
613 
614 	esr = readl(&regs->esr);
615 	bxcan_handle_state_change(ndev, esr);
616 
617 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
618 		bxcan_handle_bus_err(ndev, esr);
619 
620 	msr |= BXCAN_MSR_ERRI;
621 	writel(msr, &regs->msr);
622 	can_rx_offload_irq_finish(&priv->offload);
623 
624 	return IRQ_HANDLED;
625 }
626 
627 static int bxcan_chip_start(struct net_device *ndev)
628 {
629 	struct bxcan_priv *priv = netdev_priv(ndev);
630 	struct bxcan_regs __iomem *regs = priv->regs;
631 	struct can_bittiming *bt = &priv->can.bittiming;
632 	u32 clr, set;
633 	int err;
634 
635 	err = bxcan_chip_softreset(priv);
636 	if (err) {
637 		netdev_err(ndev, "failed to reset chip, error %pe\n",
638 			   ERR_PTR(err));
639 		return err;
640 	}
641 
642 	err = bxcan_leave_sleep_mode(priv);
643 	if (err) {
644 		netdev_err(ndev, "failed to leave sleep mode, error %pe\n",
645 			   ERR_PTR(err));
646 		goto failed_leave_sleep;
647 	}
648 
649 	err = bxcan_enter_init_mode(priv);
650 	if (err) {
651 		netdev_err(ndev, "failed to enter init mode, error %pe\n",
652 			   ERR_PTR(err));
653 		goto failed_enter_init;
654 	}
655 
656 	/* MCR
657 	 *
658 	 * select request order priority
659 	 * enable time triggered mode
660 	 * bus-off state left on sw request
661 	 * sleep mode left on sw request
662 	 * retransmit automatically on error
663 	 * do not lock RX FIFO on overrun
664 	 */
665 	bxcan_rmw(priv, &regs->mcr,
666 		  BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART |
667 		  BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP);
668 
669 	/* Bit timing register settings */
670 	set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) |
671 		FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 +
672 			   bt->prop_seg - 1) |
673 		FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) |
674 		FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1);
675 
676 	/* loopback + silent mode put the controller in test mode,
677 	 * useful for hot self-test
678 	 */
679 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
680 		set |= BXCAN_BTR_LBKM;
681 
682 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
683 		set |= BXCAN_BTR_SILM;
684 
685 	bxcan_rmw(priv, &regs->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM |
686 		  BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
687 		  BXCAN_BTR_SJW_MASK, set);
688 
689 	bxcan_enable_filters(priv, priv->cfg);
690 
691 	/* Clear all internal status */
692 	priv->tx_head = 0;
693 	priv->tx_tail = 0;
694 
695 	err = bxcan_leave_init_mode(priv);
696 	if (err) {
697 		netdev_err(ndev, "failed to leave init mode, error %pe\n",
698 			   ERR_PTR(err));
699 		goto failed_leave_init;
700 	}
701 
702 	/* Set a `lec` value so that we can check for updates later */
703 	bxcan_rmw(priv, &regs->esr, BXCAN_ESR_LEC_MASK,
704 		  FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED));
705 
706 	/* IER
707 	 *
708 	 * Enable interrupt for:
709 	 * bus-off
710 	 * passive error
711 	 * warning error
712 	 * last error code
713 	 * RX FIFO pending message
714 	 * TX mailbox empty
715 	 */
716 	clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE |  BXCAN_IER_FOVIE1 |
717 		BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
718 		BXCAN_IER_FFIE0;
719 	set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE |
720 		BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE;
721 
722 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
723 		set |= BXCAN_IER_LECIE;
724 	else
725 		clr |= BXCAN_IER_LECIE;
726 
727 	bxcan_rmw(priv, &regs->ier, clr, set);
728 
729 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
730 	return 0;
731 
732 failed_leave_init:
733 failed_enter_init:
734 failed_leave_sleep:
735 	bxcan_chip_softreset(priv);
736 	return err;
737 }
738 
739 static int bxcan_open(struct net_device *ndev)
740 {
741 	struct bxcan_priv *priv = netdev_priv(ndev);
742 	int err;
743 
744 	err = clk_prepare_enable(priv->clk);
745 	if (err) {
746 		netdev_err(ndev, "failed to enable clock, error %pe\n",
747 			   ERR_PTR(err));
748 		return err;
749 	}
750 
751 	err = open_candev(ndev);
752 	if (err) {
753 		netdev_err(ndev, "open_candev() failed, error %pe\n",
754 			   ERR_PTR(err));
755 		goto out_disable_clock;
756 	}
757 
758 	can_rx_offload_enable(&priv->offload);
759 	err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name,
760 			  ndev);
761 	if (err) {
762 		netdev_err(ndev, "failed to register rx irq(%d), error %pe\n",
763 			   ndev->irq, ERR_PTR(err));
764 		goto out_close_candev;
765 	}
766 
767 	err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name,
768 			  ndev);
769 	if (err) {
770 		netdev_err(ndev, "failed to register tx irq(%d), error %pe\n",
771 			   priv->tx_irq, ERR_PTR(err));
772 		goto out_free_rx_irq;
773 	}
774 
775 	err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED,
776 			  ndev->name, ndev);
777 	if (err) {
778 		netdev_err(ndev, "failed to register sce irq(%d), error %pe\n",
779 			   priv->sce_irq, ERR_PTR(err));
780 		goto out_free_tx_irq;
781 	}
782 
783 	err = bxcan_chip_start(ndev);
784 	if (err)
785 		goto out_free_sce_irq;
786 
787 	netif_start_queue(ndev);
788 	return 0;
789 
790 out_free_sce_irq:
791 	free_irq(priv->sce_irq, ndev);
792 out_free_tx_irq:
793 	free_irq(priv->tx_irq, ndev);
794 out_free_rx_irq:
795 	free_irq(ndev->irq, ndev);
796 out_close_candev:
797 	can_rx_offload_disable(&priv->offload);
798 	close_candev(ndev);
799 out_disable_clock:
800 	clk_disable_unprepare(priv->clk);
801 	return err;
802 }
803 
804 static void bxcan_chip_stop(struct net_device *ndev)
805 {
806 	struct bxcan_priv *priv = netdev_priv(ndev);
807 	struct bxcan_regs __iomem *regs = priv->regs;
808 
809 	/* disable all interrupts */
810 	bxcan_rmw(priv, &regs->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE |
811 		  BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE |
812 		  BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
813 		  BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
814 		  BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
815 	bxcan_disable_filters(priv, priv->cfg);
816 	bxcan_enter_sleep_mode(priv);
817 	priv->can.state = CAN_STATE_STOPPED;
818 }
819 
820 static int bxcan_stop(struct net_device *ndev)
821 {
822 	struct bxcan_priv *priv = netdev_priv(ndev);
823 
824 	netif_stop_queue(ndev);
825 	bxcan_chip_stop(ndev);
826 	free_irq(ndev->irq, ndev);
827 	free_irq(priv->tx_irq, ndev);
828 	free_irq(priv->sce_irq, ndev);
829 	can_rx_offload_disable(&priv->offload);
830 	close_candev(ndev);
831 	clk_disable_unprepare(priv->clk);
832 	return 0;
833 }
834 
835 static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
836 				    struct net_device *ndev)
837 {
838 	struct bxcan_priv *priv = netdev_priv(ndev);
839 	struct can_frame *cf = (struct can_frame *)skb->data;
840 	struct bxcan_regs __iomem *regs = priv->regs;
841 	struct bxcan_mb __iomem *mb_regs;
842 	unsigned int idx;
843 	u32 id;
844 	int i, j;
845 
846 	if (can_dropped_invalid_skb(ndev, skb))
847 		return NETDEV_TX_OK;
848 
849 	if (bxcan_tx_busy(priv))
850 		return NETDEV_TX_BUSY;
851 
852 	idx = bxcan_get_tx_head(priv);
853 	priv->tx_head++;
854 	if (bxcan_get_tx_free(priv) == 0)
855 		netif_stop_queue(ndev);
856 
857 	mb_regs = &regs->tx_mb[idx];
858 	if (cf->can_id & CAN_EFF_FLAG)
859 		id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) |
860 			BXCAN_TIxR_IDE;
861 	else
862 		id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id);
863 
864 	if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
865 		id |= BXCAN_TIxR_RTR;
866 	} else {
867 		for (i = 0, j = 0; i < cf->len; i += 4, j++)
868 			writel(*(u32 *)(cf->data + i), &mb_regs->data[j]);
869 	}
870 
871 	writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc);
872 
873 	can_put_echo_skb(skb, ndev, idx, 0);
874 
875 	/* Start transmission */
876 	writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id);
877 
878 	return NETDEV_TX_OK;
879 }
880 
881 static const struct net_device_ops bxcan_netdev_ops = {
882 	.ndo_open = bxcan_open,
883 	.ndo_stop = bxcan_stop,
884 	.ndo_start_xmit = bxcan_start_xmit,
885 	.ndo_change_mtu = can_change_mtu,
886 };
887 
888 static const struct ethtool_ops bxcan_ethtool_ops = {
889 	.get_ts_info = ethtool_op_get_ts_info,
890 };
891 
892 static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
893 {
894 	int err;
895 
896 	switch (mode) {
897 	case CAN_MODE_START:
898 		err = bxcan_chip_start(ndev);
899 		if (err)
900 			return err;
901 
902 		netif_wake_queue(ndev);
903 		break;
904 
905 	default:
906 		return -EOPNOTSUPP;
907 	}
908 
909 	return 0;
910 }
911 
912 static int bxcan_get_berr_counter(const struct net_device *ndev,
913 				  struct can_berr_counter *bec)
914 {
915 	struct bxcan_priv *priv = netdev_priv(ndev);
916 	struct bxcan_regs __iomem *regs = priv->regs;
917 	u32 esr;
918 	int err;
919 
920 	err = clk_prepare_enable(priv->clk);
921 	if (err)
922 		return err;
923 
924 	esr = readl(&regs->esr);
925 	bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
926 	bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
927 	clk_disable_unprepare(priv->clk);
928 	return 0;
929 }
930 
931 static int bxcan_probe(struct platform_device *pdev)
932 {
933 	struct device_node *np = pdev->dev.of_node;
934 	struct device *dev = &pdev->dev;
935 	struct net_device *ndev;
936 	struct bxcan_priv *priv;
937 	struct clk *clk = NULL;
938 	void __iomem *regs;
939 	struct regmap *gcan;
940 	enum bxcan_cfg cfg;
941 	int err, rx_irq, tx_irq, sce_irq;
942 
943 	regs = devm_platform_ioremap_resource(pdev, 0);
944 	if (IS_ERR(regs)) {
945 		dev_err(dev, "failed to get base address\n");
946 		return PTR_ERR(regs);
947 	}
948 
949 	gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan");
950 	if (IS_ERR(gcan)) {
951 		dev_err(dev, "failed to get shared memory base address\n");
952 		return PTR_ERR(gcan);
953 	}
954 
955 	if (of_property_read_bool(np, "st,can-primary"))
956 		cfg = BXCAN_CFG_DUAL_PRIMARY;
957 	else if (of_property_read_bool(np, "st,can-secondary"))
958 		cfg = BXCAN_CFG_DUAL_SECONDARY;
959 	else
960 		cfg = BXCAN_CFG_SINGLE;
961 
962 	clk = devm_clk_get(dev, NULL);
963 	if (IS_ERR(clk)) {
964 		dev_err(dev, "failed to get clock\n");
965 		return PTR_ERR(clk);
966 	}
967 
968 	rx_irq = platform_get_irq_byname(pdev, "rx0");
969 	if (rx_irq < 0)
970 		return rx_irq;
971 
972 	tx_irq = platform_get_irq_byname(pdev, "tx");
973 	if (tx_irq < 0)
974 		return tx_irq;
975 
976 	sce_irq = platform_get_irq_byname(pdev, "sce");
977 	if (sce_irq < 0)
978 		return sce_irq;
979 
980 	ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM);
981 	if (!ndev) {
982 		dev_err(dev, "alloc_candev() failed\n");
983 		return -ENOMEM;
984 	}
985 
986 	priv = netdev_priv(ndev);
987 	platform_set_drvdata(pdev, ndev);
988 	SET_NETDEV_DEV(ndev, dev);
989 	ndev->netdev_ops = &bxcan_netdev_ops;
990 	ndev->ethtool_ops = &bxcan_ethtool_ops;
991 	ndev->irq = rx_irq;
992 	ndev->flags |= IFF_ECHO;
993 
994 	priv->dev = dev;
995 	priv->ndev = ndev;
996 	priv->regs = regs;
997 	priv->gcan = gcan;
998 	priv->clk = clk;
999 	priv->tx_irq = tx_irq;
1000 	priv->sce_irq = sce_irq;
1001 	priv->cfg = cfg;
1002 	priv->can.clock.freq = clk_get_rate(clk);
1003 	spin_lock_init(&priv->rmw_lock);
1004 	priv->tx_head = 0;
1005 	priv->tx_tail = 0;
1006 	priv->can.bittiming_const = &bxcan_bittiming_const;
1007 	priv->can.do_set_mode = bxcan_do_set_mode;
1008 	priv->can.do_get_berr_counter = bxcan_get_berr_counter;
1009 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1010 		CAN_CTRLMODE_LISTENONLY	| CAN_CTRLMODE_BERR_REPORTING;
1011 
1012 	priv->offload.mailbox_read = bxcan_mailbox_read;
1013 	err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT);
1014 	if (err) {
1015 		dev_err(dev, "failed to add FIFO rx_offload\n");
1016 		goto out_free_candev;
1017 	}
1018 
1019 	err = register_candev(ndev);
1020 	if (err) {
1021 		dev_err(dev, "failed to register netdev\n");
1022 		goto out_can_rx_offload_del;
1023 	}
1024 
1025 	dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq,
1026 		 tx_irq, rx_irq, sce_irq);
1027 	return 0;
1028 
1029 out_can_rx_offload_del:
1030 	can_rx_offload_del(&priv->offload);
1031 out_free_candev:
1032 	free_candev(ndev);
1033 	return err;
1034 }
1035 
1036 static void bxcan_remove(struct platform_device *pdev)
1037 {
1038 	struct net_device *ndev = platform_get_drvdata(pdev);
1039 	struct bxcan_priv *priv = netdev_priv(ndev);
1040 
1041 	unregister_candev(ndev);
1042 	clk_disable_unprepare(priv->clk);
1043 	can_rx_offload_del(&priv->offload);
1044 	free_candev(ndev);
1045 }
1046 
1047 static int __maybe_unused bxcan_suspend(struct device *dev)
1048 {
1049 	struct net_device *ndev = dev_get_drvdata(dev);
1050 	struct bxcan_priv *priv = netdev_priv(ndev);
1051 
1052 	if (!netif_running(ndev))
1053 		return 0;
1054 
1055 	netif_stop_queue(ndev);
1056 	netif_device_detach(ndev);
1057 
1058 	bxcan_enter_sleep_mode(priv);
1059 	priv->can.state = CAN_STATE_SLEEPING;
1060 	clk_disable_unprepare(priv->clk);
1061 	return 0;
1062 }
1063 
1064 static int __maybe_unused bxcan_resume(struct device *dev)
1065 {
1066 	struct net_device *ndev = dev_get_drvdata(dev);
1067 	struct bxcan_priv *priv = netdev_priv(ndev);
1068 
1069 	if (!netif_running(ndev))
1070 		return 0;
1071 
1072 	clk_prepare_enable(priv->clk);
1073 	bxcan_leave_sleep_mode(priv);
1074 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1075 
1076 	netif_device_attach(ndev);
1077 	netif_start_queue(ndev);
1078 	return 0;
1079 }
1080 
1081 static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume);
1082 
1083 static const struct of_device_id bxcan_of_match[] = {
1084 	{.compatible = "st,stm32f4-bxcan"},
1085 	{ /* sentinel */ },
1086 };
1087 MODULE_DEVICE_TABLE(of, bxcan_of_match);
1088 
1089 static struct platform_driver bxcan_driver = {
1090 	.driver = {
1091 		.name = KBUILD_MODNAME,
1092 		.pm = &bxcan_pm_ops,
1093 		.of_match_table = bxcan_of_match,
1094 	},
1095 	.probe = bxcan_probe,
1096 	.remove_new = bxcan_remove,
1097 };
1098 
1099 module_platform_driver(bxcan_driver);
1100 
1101 MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
1102 MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver");
1103 MODULE_LICENSE("GPL");
1104