xref: /openbmc/linux/drivers/net/can/xilinx_can.c (revision 64d85cc9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xilinx CAN device driver
3  *
4  * Copyright (C) 2012 - 2014 Xilinx, Inc.
5  * Copyright (C) 2009 PetaLogix. All rights reserved.
6  * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7  *
8  * Description:
9  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
27 #include <linux/can/dev.h>
28 #include <linux/can/error.h>
29 #include <linux/can/led.h>
30 #include <linux/pm_runtime.h>
31 
32 #define DRIVER_NAME	"xilinx_can"
33 
34 /* CAN registers set */
35 enum xcan_reg {
36 	XCAN_SRR_OFFSET		= 0x00, /* Software reset */
37 	XCAN_MSR_OFFSET		= 0x04, /* Mode select */
38 	XCAN_BRPR_OFFSET	= 0x08, /* Baud rate prescaler */
39 	XCAN_BTR_OFFSET		= 0x0C, /* Bit timing */
40 	XCAN_ECR_OFFSET		= 0x10, /* Error counter */
41 	XCAN_ESR_OFFSET		= 0x14, /* Error status */
42 	XCAN_SR_OFFSET		= 0x18, /* Status */
43 	XCAN_ISR_OFFSET		= 0x1C, /* Interrupt status */
44 	XCAN_IER_OFFSET		= 0x20, /* Interrupt enable */
45 	XCAN_ICR_OFFSET		= 0x24, /* Interrupt clear */
46 
47 	/* not on CAN FD cores */
48 	XCAN_TXFIFO_OFFSET	= 0x30, /* TX FIFO base */
49 	XCAN_RXFIFO_OFFSET	= 0x50, /* RX FIFO base */
50 	XCAN_AFR_OFFSET		= 0x60, /* Acceptance Filter */
51 
52 	/* only on CAN FD cores */
53 	XCAN_TRR_OFFSET		= 0x0090, /* TX Buffer Ready Request */
54 	XCAN_AFR_EXT_OFFSET	= 0x00E0, /* Acceptance Filter */
55 	XCAN_FSR_OFFSET		= 0x00E8, /* RX FIFO Status */
56 	XCAN_TXMSG_BASE_OFFSET	= 0x0100, /* TX Message Space */
57 	XCAN_RXMSG_BASE_OFFSET	= 0x1100, /* RX Message Space */
58 	XCAN_RXMSG_2_BASE_OFFSET	= 0x2100, /* RX Message Space */
59 };
60 
61 #define XCAN_FRAME_ID_OFFSET(frame_base)	((frame_base) + 0x00)
62 #define XCAN_FRAME_DLC_OFFSET(frame_base)	((frame_base) + 0x04)
63 #define XCAN_FRAME_DW1_OFFSET(frame_base)	((frame_base) + 0x08)
64 #define XCAN_FRAME_DW2_OFFSET(frame_base)	((frame_base) + 0x0C)
65 
66 #define XCAN_CANFD_FRAME_SIZE		0x48
67 #define XCAN_TXMSG_FRAME_OFFSET(n)	(XCAN_TXMSG_BASE_OFFSET + \
68 					 XCAN_CANFD_FRAME_SIZE * (n))
69 #define XCAN_RXMSG_FRAME_OFFSET(n)	(XCAN_RXMSG_BASE_OFFSET + \
70 					 XCAN_CANFD_FRAME_SIZE * (n))
71 #define XCAN_RXMSG_2_FRAME_OFFSET(n)	(XCAN_RXMSG_2_BASE_OFFSET + \
72 					 XCAN_CANFD_FRAME_SIZE * (n))
73 
74 /* the single TX mailbox used by this driver on CAN FD HW */
75 #define XCAN_TX_MAILBOX_IDX		0
76 
77 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
78 #define XCAN_SRR_CEN_MASK		0x00000002 /* CAN enable */
79 #define XCAN_SRR_RESET_MASK		0x00000001 /* Soft Reset the CAN core */
80 #define XCAN_MSR_LBACK_MASK		0x00000002 /* Loop back mode select */
81 #define XCAN_MSR_SLEEP_MASK		0x00000001 /* Sleep mode select */
82 #define XCAN_BRPR_BRP_MASK		0x000000FF /* Baud rate prescaler */
83 #define XCAN_BTR_SJW_MASK		0x00000180 /* Synchronous jump width */
84 #define XCAN_BTR_TS2_MASK		0x00000070 /* Time segment 2 */
85 #define XCAN_BTR_TS1_MASK		0x0000000F /* Time segment 1 */
86 #define XCAN_BTR_SJW_MASK_CANFD		0x000F0000 /* Synchronous jump width */
87 #define XCAN_BTR_TS2_MASK_CANFD		0x00000F00 /* Time segment 2 */
88 #define XCAN_BTR_TS1_MASK_CANFD		0x0000003F /* Time segment 1 */
89 #define XCAN_ECR_REC_MASK		0x0000FF00 /* Receive error counter */
90 #define XCAN_ECR_TEC_MASK		0x000000FF /* Transmit error counter */
91 #define XCAN_ESR_ACKER_MASK		0x00000010 /* ACK error */
92 #define XCAN_ESR_BERR_MASK		0x00000008 /* Bit error */
93 #define XCAN_ESR_STER_MASK		0x00000004 /* Stuff error */
94 #define XCAN_ESR_FMER_MASK		0x00000002 /* Form error */
95 #define XCAN_ESR_CRCER_MASK		0x00000001 /* CRC error */
96 #define XCAN_SR_TXFLL_MASK		0x00000400 /* TX FIFO is full */
97 #define XCAN_SR_ESTAT_MASK		0x00000180 /* Error status */
98 #define XCAN_SR_ERRWRN_MASK		0x00000040 /* Error warning */
99 #define XCAN_SR_NORMAL_MASK		0x00000008 /* Normal mode */
100 #define XCAN_SR_LBACK_MASK		0x00000002 /* Loop back mode */
101 #define XCAN_SR_CONFIG_MASK		0x00000001 /* Configuration mode */
102 #define XCAN_IXR_RXMNF_MASK		0x00020000 /* RX match not finished */
103 #define XCAN_IXR_TXFEMP_MASK		0x00004000 /* TX FIFO Empty */
104 #define XCAN_IXR_WKUP_MASK		0x00000800 /* Wake up interrupt */
105 #define XCAN_IXR_SLP_MASK		0x00000400 /* Sleep interrupt */
106 #define XCAN_IXR_BSOFF_MASK		0x00000200 /* Bus off interrupt */
107 #define XCAN_IXR_ERROR_MASK		0x00000100 /* Error interrupt */
108 #define XCAN_IXR_RXNEMP_MASK		0x00000080 /* RX FIFO NotEmpty intr */
109 #define XCAN_IXR_RXOFLW_MASK		0x00000040 /* RX FIFO Overflow intr */
110 #define XCAN_IXR_RXOK_MASK		0x00000010 /* Message received intr */
111 #define XCAN_IXR_TXFLL_MASK		0x00000004 /* Tx FIFO Full intr */
112 #define XCAN_IXR_TXOK_MASK		0x00000002 /* TX successful intr */
113 #define XCAN_IXR_ARBLST_MASK		0x00000001 /* Arbitration lost intr */
114 #define XCAN_IDR_ID1_MASK		0xFFE00000 /* Standard msg identifier */
115 #define XCAN_IDR_SRR_MASK		0x00100000 /* Substitute remote TXreq */
116 #define XCAN_IDR_IDE_MASK		0x00080000 /* Identifier extension */
117 #define XCAN_IDR_ID2_MASK		0x0007FFFE /* Extended message ident */
118 #define XCAN_IDR_RTR_MASK		0x00000001 /* Remote TX request */
119 #define XCAN_DLCR_DLC_MASK		0xF0000000 /* Data length code */
120 #define XCAN_FSR_FL_MASK		0x00003F00 /* RX Fill Level */
121 #define XCAN_FSR_IRI_MASK		0x00000080 /* RX Increment Read Index */
122 #define XCAN_FSR_RI_MASK		0x0000001F /* RX Read Index */
123 
124 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
125 #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
126 #define XCAN_BTR_TS2_SHIFT		4  /* Time segment 2 */
127 #define XCAN_BTR_SJW_SHIFT_CANFD	16 /* Synchronous jump width */
128 #define XCAN_BTR_TS2_SHIFT_CANFD	8  /* Time segment 2 */
129 #define XCAN_IDR_ID1_SHIFT		21 /* Standard Messg Identifier */
130 #define XCAN_IDR_ID2_SHIFT		1  /* Extended Message Identifier */
131 #define XCAN_DLCR_DLC_SHIFT		28 /* Data length code */
132 #define XCAN_ESR_REC_SHIFT		8  /* Rx Error Count */
133 
134 /* CAN frame length constants */
135 #define XCAN_FRAME_MAX_DATA_LEN		8
136 #define XCAN_TIMEOUT			(1 * HZ)
137 
138 /* TX-FIFO-empty interrupt available */
139 #define XCAN_FLAG_TXFEMP	0x0001
140 /* RX Match Not Finished interrupt available */
141 #define XCAN_FLAG_RXMNF		0x0002
142 /* Extended acceptance filters with control at 0xE0 */
143 #define XCAN_FLAG_EXT_FILTERS	0x0004
144 /* TX mailboxes instead of TX FIFO */
145 #define XCAN_FLAG_TX_MAILBOXES	0x0008
146 /* RX FIFO with each buffer in separate registers at 0x1100
147  * instead of the regular FIFO at 0x50
148  */
149 #define XCAN_FLAG_RX_FIFO_MULTI	0x0010
150 #define XCAN_FLAG_CANFD_2	0x0020
151 
152 struct xcan_devtype_data {
153 	unsigned int flags;
154 	const struct can_bittiming_const *bittiming_const;
155 	const char *bus_clk_name;
156 	unsigned int btr_ts2_shift;
157 	unsigned int btr_sjw_shift;
158 };
159 
160 /**
161  * struct xcan_priv - This definition define CAN driver instance
162  * @can:			CAN private data structure.
163  * @tx_lock:			Lock for synchronizing TX interrupt handling
164  * @tx_head:			Tx CAN packets ready to send on the queue
165  * @tx_tail:			Tx CAN packets successfully sended on the queue
166  * @tx_max:			Maximum number packets the driver can send
167  * @napi:			NAPI structure
168  * @read_reg:			For reading data from CAN registers
169  * @write_reg:			For writing data to CAN registers
170  * @dev:			Network device data structure
171  * @reg_base:			Ioremapped address to registers
172  * @irq_flags:			For request_irq()
173  * @bus_clk:			Pointer to struct clk
174  * @can_clk:			Pointer to struct clk
175  * @devtype:			Device type specific constants
176  */
177 struct xcan_priv {
178 	struct can_priv can;
179 	spinlock_t tx_lock;
180 	unsigned int tx_head;
181 	unsigned int tx_tail;
182 	unsigned int tx_max;
183 	struct napi_struct napi;
184 	u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
185 	void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
186 			u32 val);
187 	struct device *dev;
188 	void __iomem *reg_base;
189 	unsigned long irq_flags;
190 	struct clk *bus_clk;
191 	struct clk *can_clk;
192 	struct xcan_devtype_data devtype;
193 };
194 
195 /* CAN Bittiming constants as per Xilinx CAN specs */
196 static const struct can_bittiming_const xcan_bittiming_const = {
197 	.name = DRIVER_NAME,
198 	.tseg1_min = 1,
199 	.tseg1_max = 16,
200 	.tseg2_min = 1,
201 	.tseg2_max = 8,
202 	.sjw_max = 4,
203 	.brp_min = 1,
204 	.brp_max = 256,
205 	.brp_inc = 1,
206 };
207 
208 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
209 	.name = DRIVER_NAME,
210 	.tseg1_min = 1,
211 	.tseg1_max = 64,
212 	.tseg2_min = 1,
213 	.tseg2_max = 16,
214 	.sjw_max = 16,
215 	.brp_min = 1,
216 	.brp_max = 256,
217 	.brp_inc = 1,
218 };
219 
220 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
221 	.name = DRIVER_NAME,
222 	.tseg1_min = 1,
223 	.tseg1_max = 256,
224 	.tseg2_min = 1,
225 	.tseg2_max = 128,
226 	.sjw_max = 128,
227 	.brp_min = 1,
228 	.brp_max = 256,
229 	.brp_inc = 1,
230 };
231 
232 /**
233  * xcan_write_reg_le - Write a value to the device register little endian
234  * @priv:	Driver private data structure
235  * @reg:	Register offset
236  * @val:	Value to write at the Register offset
237  *
238  * Write data to the paricular CAN register
239  */
240 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
241 			u32 val)
242 {
243 	iowrite32(val, priv->reg_base + reg);
244 }
245 
246 /**
247  * xcan_read_reg_le - Read a value from the device register little endian
248  * @priv:	Driver private data structure
249  * @reg:	Register offset
250  *
251  * Read data from the particular CAN register
252  * Return: value read from the CAN register
253  */
254 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
255 {
256 	return ioread32(priv->reg_base + reg);
257 }
258 
259 /**
260  * xcan_write_reg_be - Write a value to the device register big endian
261  * @priv:	Driver private data structure
262  * @reg:	Register offset
263  * @val:	Value to write at the Register offset
264  *
265  * Write data to the paricular CAN register
266  */
267 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
268 			u32 val)
269 {
270 	iowrite32be(val, priv->reg_base + reg);
271 }
272 
273 /**
274  * xcan_read_reg_be - Read a value from the device register big endian
275  * @priv:	Driver private data structure
276  * @reg:	Register offset
277  *
278  * Read data from the particular CAN register
279  * Return: value read from the CAN register
280  */
281 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
282 {
283 	return ioread32be(priv->reg_base + reg);
284 }
285 
286 /**
287  * xcan_rx_int_mask - Get the mask for the receive interrupt
288  * @priv:	Driver private data structure
289  *
290  * Return: The receive interrupt mask used by the driver on this HW
291  */
292 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
293 {
294 	/* RXNEMP is better suited for our use case as it cannot be cleared
295 	 * while the FIFO is non-empty, but CAN FD HW does not have it
296 	 */
297 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
298 		return XCAN_IXR_RXOK_MASK;
299 	else
300 		return XCAN_IXR_RXNEMP_MASK;
301 }
302 
303 /**
304  * set_reset_mode - Resets the CAN device mode
305  * @ndev:	Pointer to net_device structure
306  *
307  * This is the driver reset mode routine.The driver
308  * enters into configuration mode.
309  *
310  * Return: 0 on success and failure value on error
311  */
312 static int set_reset_mode(struct net_device *ndev)
313 {
314 	struct xcan_priv *priv = netdev_priv(ndev);
315 	unsigned long timeout;
316 
317 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
318 
319 	timeout = jiffies + XCAN_TIMEOUT;
320 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
321 		if (time_after(jiffies, timeout)) {
322 			netdev_warn(ndev, "timed out for config mode\n");
323 			return -ETIMEDOUT;
324 		}
325 		usleep_range(500, 10000);
326 	}
327 
328 	/* reset clears FIFOs */
329 	priv->tx_head = 0;
330 	priv->tx_tail = 0;
331 
332 	return 0;
333 }
334 
335 /**
336  * xcan_set_bittiming - CAN set bit timing routine
337  * @ndev:	Pointer to net_device structure
338  *
339  * This is the driver set bittiming  routine.
340  * Return: 0 on success and failure value on error
341  */
342 static int xcan_set_bittiming(struct net_device *ndev)
343 {
344 	struct xcan_priv *priv = netdev_priv(ndev);
345 	struct can_bittiming *bt = &priv->can.bittiming;
346 	u32 btr0, btr1;
347 	u32 is_config_mode;
348 
349 	/* Check whether Xilinx CAN is in configuration mode.
350 	 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
351 	 */
352 	is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
353 				XCAN_SR_CONFIG_MASK;
354 	if (!is_config_mode) {
355 		netdev_alert(ndev,
356 		     "BUG! Cannot set bittiming - CAN is not in config mode\n");
357 		return -EPERM;
358 	}
359 
360 	/* Setting Baud Rate prescalar value in BRPR Register */
361 	btr0 = (bt->brp - 1);
362 
363 	/* Setting Time Segment 1 in BTR Register */
364 	btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
365 
366 	/* Setting Time Segment 2 in BTR Register */
367 	btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
368 
369 	/* Setting Synchronous jump width in BTR Register */
370 	btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
371 
372 	priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
373 	priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
374 
375 	netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
376 			priv->read_reg(priv, XCAN_BRPR_OFFSET),
377 			priv->read_reg(priv, XCAN_BTR_OFFSET));
378 
379 	return 0;
380 }
381 
382 /**
383  * xcan_chip_start - This the drivers start routine
384  * @ndev:	Pointer to net_device structure
385  *
386  * This is the drivers start routine.
387  * Based on the State of the CAN device it puts
388  * the CAN device into a proper mode.
389  *
390  * Return: 0 on success and failure value on error
391  */
392 static int xcan_chip_start(struct net_device *ndev)
393 {
394 	struct xcan_priv *priv = netdev_priv(ndev);
395 	u32 reg_msr, reg_sr_mask;
396 	int err;
397 	unsigned long timeout;
398 	u32 ier;
399 
400 	/* Check if it is in reset mode */
401 	err = set_reset_mode(ndev);
402 	if (err < 0)
403 		return err;
404 
405 	err = xcan_set_bittiming(ndev);
406 	if (err < 0)
407 		return err;
408 
409 	/* Enable interrupts */
410 	ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
411 		XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
412 		XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
413 		XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
414 
415 	if (priv->devtype.flags & XCAN_FLAG_RXMNF)
416 		ier |= XCAN_IXR_RXMNF_MASK;
417 
418 	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
419 
420 	/* Check whether it is loopback mode or normal mode  */
421 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
422 		reg_msr = XCAN_MSR_LBACK_MASK;
423 		reg_sr_mask = XCAN_SR_LBACK_MASK;
424 	} else {
425 		reg_msr = 0x0;
426 		reg_sr_mask = XCAN_SR_NORMAL_MASK;
427 	}
428 
429 	/* enable the first extended filter, if any, as cores with extended
430 	 * filtering default to non-receipt if all filters are disabled
431 	 */
432 	if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
433 		priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
434 
435 	priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
436 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
437 
438 	timeout = jiffies + XCAN_TIMEOUT;
439 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
440 		if (time_after(jiffies, timeout)) {
441 			netdev_warn(ndev,
442 				"timed out for correct mode\n");
443 			return -ETIMEDOUT;
444 		}
445 	}
446 	netdev_dbg(ndev, "status:#x%08x\n",
447 			priv->read_reg(priv, XCAN_SR_OFFSET));
448 
449 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
450 	return 0;
451 }
452 
453 /**
454  * xcan_do_set_mode - This sets the mode of the driver
455  * @ndev:	Pointer to net_device structure
456  * @mode:	Tells the mode of the driver
457  *
458  * This check the drivers state and calls the
459  * the corresponding modes to set.
460  *
461  * Return: 0 on success and failure value on error
462  */
463 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
464 {
465 	int ret;
466 
467 	switch (mode) {
468 	case CAN_MODE_START:
469 		ret = xcan_chip_start(ndev);
470 		if (ret < 0) {
471 			netdev_err(ndev, "xcan_chip_start failed!\n");
472 			return ret;
473 		}
474 		netif_wake_queue(ndev);
475 		break;
476 	default:
477 		ret = -EOPNOTSUPP;
478 		break;
479 	}
480 
481 	return ret;
482 }
483 
484 /**
485  * xcan_write_frame - Write a frame to HW
486  * @skb:		sk_buff pointer that contains data to be Txed
487  * @frame_offset:	Register offset to write the frame to
488  */
489 static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
490 			     int frame_offset)
491 {
492 	u32 id, dlc, data[2] = {0, 0};
493 	struct can_frame *cf = (struct can_frame *)skb->data;
494 
495 	/* Watch carefully on the bit sequence */
496 	if (cf->can_id & CAN_EFF_FLAG) {
497 		/* Extended CAN ID format */
498 		id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
499 			XCAN_IDR_ID2_MASK;
500 		id |= (((cf->can_id & CAN_EFF_MASK) >>
501 			(CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
502 			XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
503 
504 		/* The substibute remote TX request bit should be "1"
505 		 * for extended frames as in the Xilinx CAN datasheet
506 		 */
507 		id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
508 
509 		if (cf->can_id & CAN_RTR_FLAG)
510 			/* Extended frames remote TX request */
511 			id |= XCAN_IDR_RTR_MASK;
512 	} else {
513 		/* Standard CAN ID format */
514 		id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
515 			XCAN_IDR_ID1_MASK;
516 
517 		if (cf->can_id & CAN_RTR_FLAG)
518 			/* Standard frames remote TX request */
519 			id |= XCAN_IDR_SRR_MASK;
520 	}
521 
522 	dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
523 
524 	if (cf->can_dlc > 0)
525 		data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
526 	if (cf->can_dlc > 4)
527 		data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
528 
529 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
530 	/* If the CAN frame is RTR frame this write triggers transmission
531 	 * (not on CAN FD)
532 	 */
533 	priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
534 	if (!(cf->can_id & CAN_RTR_FLAG)) {
535 		priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
536 				data[0]);
537 		/* If the CAN frame is Standard/Extended frame this
538 		 * write triggers transmission (not on CAN FD)
539 		 */
540 		priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
541 				data[1]);
542 	}
543 }
544 
545 /**
546  * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
547  *
548  * Return: 0 on success, -ENOSPC if FIFO is full.
549  */
550 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
551 {
552 	struct xcan_priv *priv = netdev_priv(ndev);
553 	unsigned long flags;
554 
555 	/* Check if the TX buffer is full */
556 	if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
557 			XCAN_SR_TXFLL_MASK))
558 		return -ENOSPC;
559 
560 	can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
561 
562 	spin_lock_irqsave(&priv->tx_lock, flags);
563 
564 	priv->tx_head++;
565 
566 	xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
567 
568 	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
569 	if (priv->tx_max > 1)
570 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
571 
572 	/* Check if the TX buffer is full */
573 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
574 		netif_stop_queue(ndev);
575 
576 	spin_unlock_irqrestore(&priv->tx_lock, flags);
577 
578 	return 0;
579 }
580 
581 /**
582  * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
583  *
584  * Return: 0 on success, -ENOSPC if there is no space
585  */
586 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
587 {
588 	struct xcan_priv *priv = netdev_priv(ndev);
589 	unsigned long flags;
590 
591 	if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
592 		     BIT(XCAN_TX_MAILBOX_IDX)))
593 		return -ENOSPC;
594 
595 	can_put_echo_skb(skb, ndev, 0);
596 
597 	spin_lock_irqsave(&priv->tx_lock, flags);
598 
599 	priv->tx_head++;
600 
601 	xcan_write_frame(priv, skb,
602 			 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
603 
604 	/* Mark buffer as ready for transmit */
605 	priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
606 
607 	netif_stop_queue(ndev);
608 
609 	spin_unlock_irqrestore(&priv->tx_lock, flags);
610 
611 	return 0;
612 }
613 
614 /**
615  * xcan_start_xmit - Starts the transmission
616  * @skb:	sk_buff pointer that contains data to be Txed
617  * @ndev:	Pointer to net_device structure
618  *
619  * This function is invoked from upper layers to initiate transmission.
620  *
621  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
622  */
623 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
624 {
625 	struct xcan_priv *priv = netdev_priv(ndev);
626 	int ret;
627 
628 	if (can_dropped_invalid_skb(ndev, skb))
629 		return NETDEV_TX_OK;
630 
631 	if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
632 		ret = xcan_start_xmit_mailbox(skb, ndev);
633 	else
634 		ret = xcan_start_xmit_fifo(skb, ndev);
635 
636 	if (ret < 0) {
637 		netdev_err(ndev, "BUG!, TX full when queue awake!\n");
638 		netif_stop_queue(ndev);
639 		return NETDEV_TX_BUSY;
640 	}
641 
642 	return NETDEV_TX_OK;
643 }
644 
645 /**
646  * xcan_rx -  Is called from CAN isr to complete the received
647  *		frame  processing
648  * @ndev:	Pointer to net_device structure
649  * @frame_base:	Register offset to the frame to be read
650  *
651  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
652  * does minimal processing and invokes "netif_receive_skb" to complete further
653  * processing.
654  * Return: 1 on success and 0 on failure.
655  */
656 static int xcan_rx(struct net_device *ndev, int frame_base)
657 {
658 	struct xcan_priv *priv = netdev_priv(ndev);
659 	struct net_device_stats *stats = &ndev->stats;
660 	struct can_frame *cf;
661 	struct sk_buff *skb;
662 	u32 id_xcan, dlc, data[2] = {0, 0};
663 
664 	skb = alloc_can_skb(ndev, &cf);
665 	if (unlikely(!skb)) {
666 		stats->rx_dropped++;
667 		return 0;
668 	}
669 
670 	/* Read a frame from Xilinx zynq CANPS */
671 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
672 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
673 				   XCAN_DLCR_DLC_SHIFT;
674 
675 	/* Change Xilinx CAN data length format to socketCAN data format */
676 	cf->can_dlc = get_can_dlc(dlc);
677 
678 	/* Change Xilinx CAN ID format to socketCAN ID format */
679 	if (id_xcan & XCAN_IDR_IDE_MASK) {
680 		/* The received frame is an Extended format frame */
681 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
682 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
683 				XCAN_IDR_ID2_SHIFT;
684 		cf->can_id |= CAN_EFF_FLAG;
685 		if (id_xcan & XCAN_IDR_RTR_MASK)
686 			cf->can_id |= CAN_RTR_FLAG;
687 	} else {
688 		/* The received frame is a standard format frame */
689 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
690 				XCAN_IDR_ID1_SHIFT;
691 		if (id_xcan & XCAN_IDR_SRR_MASK)
692 			cf->can_id |= CAN_RTR_FLAG;
693 	}
694 
695 	/* DW1/DW2 must always be read to remove message from RXFIFO */
696 	data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
697 	data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
698 
699 	if (!(cf->can_id & CAN_RTR_FLAG)) {
700 		/* Change Xilinx CAN data format to socketCAN data format */
701 		if (cf->can_dlc > 0)
702 			*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
703 		if (cf->can_dlc > 4)
704 			*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
705 	}
706 
707 	stats->rx_bytes += cf->can_dlc;
708 	stats->rx_packets++;
709 	netif_receive_skb(skb);
710 
711 	return 1;
712 }
713 
714 /**
715  * xcan_current_error_state - Get current error state from HW
716  * @ndev:	Pointer to net_device structure
717  *
718  * Checks the current CAN error state from the HW. Note that this
719  * only checks for ERROR_PASSIVE and ERROR_WARNING.
720  *
721  * Return:
722  * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
723  * otherwise.
724  */
725 static enum can_state xcan_current_error_state(struct net_device *ndev)
726 {
727 	struct xcan_priv *priv = netdev_priv(ndev);
728 	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
729 
730 	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
731 		return CAN_STATE_ERROR_PASSIVE;
732 	else if (status & XCAN_SR_ERRWRN_MASK)
733 		return CAN_STATE_ERROR_WARNING;
734 	else
735 		return CAN_STATE_ERROR_ACTIVE;
736 }
737 
738 /**
739  * xcan_set_error_state - Set new CAN error state
740  * @ndev:	Pointer to net_device structure
741  * @new_state:	The new CAN state to be set
742  * @cf:		Error frame to be populated or NULL
743  *
744  * Set new CAN error state for the device, updating statistics and
745  * populating the error frame if given.
746  */
747 static void xcan_set_error_state(struct net_device *ndev,
748 				 enum can_state new_state,
749 				 struct can_frame *cf)
750 {
751 	struct xcan_priv *priv = netdev_priv(ndev);
752 	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
753 	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
754 	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
755 	enum can_state tx_state = txerr >= rxerr ? new_state : 0;
756 	enum can_state rx_state = txerr <= rxerr ? new_state : 0;
757 
758 	/* non-ERROR states are handled elsewhere */
759 	if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
760 		return;
761 
762 	can_change_state(ndev, cf, tx_state, rx_state);
763 
764 	if (cf) {
765 		cf->data[6] = txerr;
766 		cf->data[7] = rxerr;
767 	}
768 }
769 
770 /**
771  * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
772  * @ndev:	Pointer to net_device structure
773  *
774  * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
775  * the performed RX/TX has caused it to drop to a lesser state and set
776  * the interface state accordingly.
777  */
778 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
779 {
780 	struct xcan_priv *priv = netdev_priv(ndev);
781 	enum can_state old_state = priv->can.state;
782 	enum can_state new_state;
783 
784 	/* changing error state due to successful frame RX/TX can only
785 	 * occur from these states
786 	 */
787 	if (old_state != CAN_STATE_ERROR_WARNING &&
788 	    old_state != CAN_STATE_ERROR_PASSIVE)
789 		return;
790 
791 	new_state = xcan_current_error_state(ndev);
792 
793 	if (new_state != old_state) {
794 		struct sk_buff *skb;
795 		struct can_frame *cf;
796 
797 		skb = alloc_can_err_skb(ndev, &cf);
798 
799 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
800 
801 		if (skb) {
802 			struct net_device_stats *stats = &ndev->stats;
803 
804 			stats->rx_packets++;
805 			stats->rx_bytes += cf->can_dlc;
806 			netif_rx(skb);
807 		}
808 	}
809 }
810 
811 /**
812  * xcan_err_interrupt - error frame Isr
813  * @ndev:	net_device pointer
814  * @isr:	interrupt status register value
815  *
816  * This is the CAN error interrupt and it will
817  * check the the type of error and forward the error
818  * frame to upper layers.
819  */
820 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
821 {
822 	struct xcan_priv *priv = netdev_priv(ndev);
823 	struct net_device_stats *stats = &ndev->stats;
824 	struct can_frame *cf;
825 	struct sk_buff *skb;
826 	u32 err_status;
827 
828 	skb = alloc_can_err_skb(ndev, &cf);
829 
830 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
831 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
832 
833 	if (isr & XCAN_IXR_BSOFF_MASK) {
834 		priv->can.state = CAN_STATE_BUS_OFF;
835 		priv->can.can_stats.bus_off++;
836 		/* Leave device in Config Mode in bus-off state */
837 		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
838 		can_bus_off(ndev);
839 		if (skb)
840 			cf->can_id |= CAN_ERR_BUSOFF;
841 	} else {
842 		enum can_state new_state = xcan_current_error_state(ndev);
843 
844 		if (new_state != priv->can.state)
845 			xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
846 	}
847 
848 	/* Check for Arbitration lost interrupt */
849 	if (isr & XCAN_IXR_ARBLST_MASK) {
850 		priv->can.can_stats.arbitration_lost++;
851 		if (skb) {
852 			cf->can_id |= CAN_ERR_LOSTARB;
853 			cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
854 		}
855 	}
856 
857 	/* Check for RX FIFO Overflow interrupt */
858 	if (isr & XCAN_IXR_RXOFLW_MASK) {
859 		stats->rx_over_errors++;
860 		stats->rx_errors++;
861 		if (skb) {
862 			cf->can_id |= CAN_ERR_CRTL;
863 			cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
864 		}
865 	}
866 
867 	/* Check for RX Match Not Finished interrupt */
868 	if (isr & XCAN_IXR_RXMNF_MASK) {
869 		stats->rx_dropped++;
870 		stats->rx_errors++;
871 		netdev_err(ndev, "RX match not finished, frame discarded\n");
872 		if (skb) {
873 			cf->can_id |= CAN_ERR_CRTL;
874 			cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
875 		}
876 	}
877 
878 	/* Check for error interrupt */
879 	if (isr & XCAN_IXR_ERROR_MASK) {
880 		if (skb)
881 			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
882 
883 		/* Check for Ack error interrupt */
884 		if (err_status & XCAN_ESR_ACKER_MASK) {
885 			stats->tx_errors++;
886 			if (skb) {
887 				cf->can_id |= CAN_ERR_ACK;
888 				cf->data[3] = CAN_ERR_PROT_LOC_ACK;
889 			}
890 		}
891 
892 		/* Check for Bit error interrupt */
893 		if (err_status & XCAN_ESR_BERR_MASK) {
894 			stats->tx_errors++;
895 			if (skb) {
896 				cf->can_id |= CAN_ERR_PROT;
897 				cf->data[2] = CAN_ERR_PROT_BIT;
898 			}
899 		}
900 
901 		/* Check for Stuff error interrupt */
902 		if (err_status & XCAN_ESR_STER_MASK) {
903 			stats->rx_errors++;
904 			if (skb) {
905 				cf->can_id |= CAN_ERR_PROT;
906 				cf->data[2] = CAN_ERR_PROT_STUFF;
907 			}
908 		}
909 
910 		/* Check for Form error interrupt */
911 		if (err_status & XCAN_ESR_FMER_MASK) {
912 			stats->rx_errors++;
913 			if (skb) {
914 				cf->can_id |= CAN_ERR_PROT;
915 				cf->data[2] = CAN_ERR_PROT_FORM;
916 			}
917 		}
918 
919 		/* Check for CRC error interrupt */
920 		if (err_status & XCAN_ESR_CRCER_MASK) {
921 			stats->rx_errors++;
922 			if (skb) {
923 				cf->can_id |= CAN_ERR_PROT;
924 				cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
925 			}
926 		}
927 			priv->can.can_stats.bus_error++;
928 	}
929 
930 	if (skb) {
931 		stats->rx_packets++;
932 		stats->rx_bytes += cf->can_dlc;
933 		netif_rx(skb);
934 	}
935 
936 	netdev_dbg(ndev, "%s: error status register:0x%x\n",
937 			__func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
938 }
939 
940 /**
941  * xcan_state_interrupt - It will check the state of the CAN device
942  * @ndev:	net_device pointer
943  * @isr:	interrupt status register value
944  *
945  * This will checks the state of the CAN device
946  * and puts the device into appropriate state.
947  */
948 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
949 {
950 	struct xcan_priv *priv = netdev_priv(ndev);
951 
952 	/* Check for Sleep interrupt if set put CAN device in sleep state */
953 	if (isr & XCAN_IXR_SLP_MASK)
954 		priv->can.state = CAN_STATE_SLEEPING;
955 
956 	/* Check for Wake up interrupt if set put CAN device in Active state */
957 	if (isr & XCAN_IXR_WKUP_MASK)
958 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
959 }
960 
961 /**
962  * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
963  *
964  * Return: Register offset of the next frame in RX FIFO.
965  */
966 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
967 {
968 	int offset;
969 
970 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
971 		u32 fsr;
972 
973 		/* clear RXOK before the is-empty check so that any newly
974 		 * received frame will reassert it without a race
975 		 */
976 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
977 
978 		fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
979 
980 		/* check if RX FIFO is empty */
981 		if (!(fsr & XCAN_FSR_FL_MASK))
982 			return -ENOENT;
983 
984 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
985 			offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
986 		else
987 			offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
988 
989 	} else {
990 		/* check if RX FIFO is empty */
991 		if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
992 		      XCAN_IXR_RXNEMP_MASK))
993 			return -ENOENT;
994 
995 		/* frames are read from a static offset */
996 		offset = XCAN_RXFIFO_OFFSET;
997 	}
998 
999 	return offset;
1000 }
1001 
1002 /**
1003  * xcan_rx_poll - Poll routine for rx packets (NAPI)
1004  * @napi:	napi structure pointer
1005  * @quota:	Max number of rx packets to be processed.
1006  *
1007  * This is the poll routine for rx part.
1008  * It will process the packets maximux quota value.
1009  *
1010  * Return: number of packets received
1011  */
1012 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1013 {
1014 	struct net_device *ndev = napi->dev;
1015 	struct xcan_priv *priv = netdev_priv(ndev);
1016 	u32 ier;
1017 	int work_done = 0;
1018 	int frame_offset;
1019 
1020 	while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1021 	       (work_done < quota)) {
1022 		work_done += xcan_rx(ndev, frame_offset);
1023 
1024 		if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1025 			/* increment read index */
1026 			priv->write_reg(priv, XCAN_FSR_OFFSET,
1027 					XCAN_FSR_IRI_MASK);
1028 		else
1029 			/* clear rx-not-empty (will actually clear only if
1030 			 * empty)
1031 			 */
1032 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1033 					XCAN_IXR_RXNEMP_MASK);
1034 	}
1035 
1036 	if (work_done) {
1037 		can_led_event(ndev, CAN_LED_EVENT_RX);
1038 		xcan_update_error_state_after_rxtx(ndev);
1039 	}
1040 
1041 	if (work_done < quota) {
1042 		napi_complete_done(napi, work_done);
1043 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1044 		ier |= xcan_rx_int_mask(priv);
1045 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1046 	}
1047 	return work_done;
1048 }
1049 
1050 /**
1051  * xcan_tx_interrupt - Tx Done Isr
1052  * @ndev:	net_device pointer
1053  * @isr:	Interrupt status register value
1054  */
1055 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1056 {
1057 	struct xcan_priv *priv = netdev_priv(ndev);
1058 	struct net_device_stats *stats = &ndev->stats;
1059 	unsigned int frames_in_fifo;
1060 	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1061 	unsigned long flags;
1062 	int retries = 0;
1063 
1064 	/* Synchronize with xmit as we need to know the exact number
1065 	 * of frames in the FIFO to stay in sync due to the TXFEMP
1066 	 * handling.
1067 	 * This also prevents a race between netif_wake_queue() and
1068 	 * netif_stop_queue().
1069 	 */
1070 	spin_lock_irqsave(&priv->tx_lock, flags);
1071 
1072 	frames_in_fifo = priv->tx_head - priv->tx_tail;
1073 
1074 	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1075 		/* clear TXOK anyway to avoid getting back here */
1076 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1077 		spin_unlock_irqrestore(&priv->tx_lock, flags);
1078 		return;
1079 	}
1080 
1081 	/* Check if 2 frames were sent (TXOK only means that at least 1
1082 	 * frame was sent).
1083 	 */
1084 	if (frames_in_fifo > 1) {
1085 		WARN_ON(frames_in_fifo > priv->tx_max);
1086 
1087 		/* Synchronize TXOK and isr so that after the loop:
1088 		 * (1) isr variable is up-to-date at least up to TXOK clear
1089 		 *     time. This avoids us clearing a TXOK of a second frame
1090 		 *     but not noticing that the FIFO is now empty and thus
1091 		 *     marking only a single frame as sent.
1092 		 * (2) No TXOK is left. Having one could mean leaving a
1093 		 *     stray TXOK as we might process the associated frame
1094 		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
1095 		 *     clear to satisfy (1).
1096 		 */
1097 		while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
1098 			priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1099 			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1100 		}
1101 
1102 		if (isr & XCAN_IXR_TXFEMP_MASK) {
1103 			/* nothing in FIFO anymore */
1104 			frames_sent = frames_in_fifo;
1105 		}
1106 	} else {
1107 		/* single frame in fifo, just clear TXOK */
1108 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1109 	}
1110 
1111 	while (frames_sent--) {
1112 		stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1113 						    priv->tx_max);
1114 		priv->tx_tail++;
1115 		stats->tx_packets++;
1116 	}
1117 
1118 	netif_wake_queue(ndev);
1119 
1120 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1121 
1122 	can_led_event(ndev, CAN_LED_EVENT_TX);
1123 	xcan_update_error_state_after_rxtx(ndev);
1124 }
1125 
1126 /**
1127  * xcan_interrupt - CAN Isr
1128  * @irq:	irq number
1129  * @dev_id:	device id poniter
1130  *
1131  * This is the xilinx CAN Isr. It checks for the type of interrupt
1132  * and invokes the corresponding ISR.
1133  *
1134  * Return:
1135  * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1136  */
1137 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1138 {
1139 	struct net_device *ndev = (struct net_device *)dev_id;
1140 	struct xcan_priv *priv = netdev_priv(ndev);
1141 	u32 isr, ier;
1142 	u32 isr_errors;
1143 	u32 rx_int_mask = xcan_rx_int_mask(priv);
1144 
1145 	/* Get the interrupt status from Xilinx CAN */
1146 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1147 	if (!isr)
1148 		return IRQ_NONE;
1149 
1150 	/* Check for the type of interrupt and Processing it */
1151 	if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1152 		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1153 				XCAN_IXR_WKUP_MASK));
1154 		xcan_state_interrupt(ndev, isr);
1155 	}
1156 
1157 	/* Check for Tx interrupt and Processing it */
1158 	if (isr & XCAN_IXR_TXOK_MASK)
1159 		xcan_tx_interrupt(ndev, isr);
1160 
1161 	/* Check for the type of error interrupt and Processing it */
1162 	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1163 			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1164 			    XCAN_IXR_RXMNF_MASK);
1165 	if (isr_errors) {
1166 		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1167 		xcan_err_interrupt(ndev, isr);
1168 	}
1169 
1170 	/* Check for the type of receive interrupt and Processing it */
1171 	if (isr & rx_int_mask) {
1172 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1173 		ier &= ~rx_int_mask;
1174 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1175 		napi_schedule(&priv->napi);
1176 	}
1177 	return IRQ_HANDLED;
1178 }
1179 
1180 /**
1181  * xcan_chip_stop - Driver stop routine
1182  * @ndev:	Pointer to net_device structure
1183  *
1184  * This is the drivers stop routine. It will disable the
1185  * interrupts and put the device into configuration mode.
1186  */
1187 static void xcan_chip_stop(struct net_device *ndev)
1188 {
1189 	struct xcan_priv *priv = netdev_priv(ndev);
1190 
1191 	/* Disable interrupts and leave the can in configuration mode */
1192 	set_reset_mode(ndev);
1193 	priv->can.state = CAN_STATE_STOPPED;
1194 }
1195 
1196 /**
1197  * xcan_open - Driver open routine
1198  * @ndev:	Pointer to net_device structure
1199  *
1200  * This is the driver open routine.
1201  * Return: 0 on success and failure value on error
1202  */
1203 static int xcan_open(struct net_device *ndev)
1204 {
1205 	struct xcan_priv *priv = netdev_priv(ndev);
1206 	int ret;
1207 
1208 	ret = pm_runtime_get_sync(priv->dev);
1209 	if (ret < 0) {
1210 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1211 				__func__, ret);
1212 		return ret;
1213 	}
1214 
1215 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1216 			ndev->name, ndev);
1217 	if (ret < 0) {
1218 		netdev_err(ndev, "irq allocation for CAN failed\n");
1219 		goto err;
1220 	}
1221 
1222 	/* Set chip into reset mode */
1223 	ret = set_reset_mode(ndev);
1224 	if (ret < 0) {
1225 		netdev_err(ndev, "mode resetting failed!\n");
1226 		goto err_irq;
1227 	}
1228 
1229 	/* Common open */
1230 	ret = open_candev(ndev);
1231 	if (ret)
1232 		goto err_irq;
1233 
1234 	ret = xcan_chip_start(ndev);
1235 	if (ret < 0) {
1236 		netdev_err(ndev, "xcan_chip_start failed!\n");
1237 		goto err_candev;
1238 	}
1239 
1240 	can_led_event(ndev, CAN_LED_EVENT_OPEN);
1241 	napi_enable(&priv->napi);
1242 	netif_start_queue(ndev);
1243 
1244 	return 0;
1245 
1246 err_candev:
1247 	close_candev(ndev);
1248 err_irq:
1249 	free_irq(ndev->irq, ndev);
1250 err:
1251 	pm_runtime_put(priv->dev);
1252 
1253 	return ret;
1254 }
1255 
1256 /**
1257  * xcan_close - Driver close routine
1258  * @ndev:	Pointer to net_device structure
1259  *
1260  * Return: 0 always
1261  */
1262 static int xcan_close(struct net_device *ndev)
1263 {
1264 	struct xcan_priv *priv = netdev_priv(ndev);
1265 
1266 	netif_stop_queue(ndev);
1267 	napi_disable(&priv->napi);
1268 	xcan_chip_stop(ndev);
1269 	free_irq(ndev->irq, ndev);
1270 	close_candev(ndev);
1271 
1272 	can_led_event(ndev, CAN_LED_EVENT_STOP);
1273 	pm_runtime_put(priv->dev);
1274 
1275 	return 0;
1276 }
1277 
1278 /**
1279  * xcan_get_berr_counter - error counter routine
1280  * @ndev:	Pointer to net_device structure
1281  * @bec:	Pointer to can_berr_counter structure
1282  *
1283  * This is the driver error counter routine.
1284  * Return: 0 on success and failure value on error
1285  */
1286 static int xcan_get_berr_counter(const struct net_device *ndev,
1287 					struct can_berr_counter *bec)
1288 {
1289 	struct xcan_priv *priv = netdev_priv(ndev);
1290 	int ret;
1291 
1292 	ret = pm_runtime_get_sync(priv->dev);
1293 	if (ret < 0) {
1294 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1295 				__func__, ret);
1296 		return ret;
1297 	}
1298 
1299 	bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1300 	bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1301 			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1302 
1303 	pm_runtime_put(priv->dev);
1304 
1305 	return 0;
1306 }
1307 
1308 
1309 static const struct net_device_ops xcan_netdev_ops = {
1310 	.ndo_open	= xcan_open,
1311 	.ndo_stop	= xcan_close,
1312 	.ndo_start_xmit	= xcan_start_xmit,
1313 	.ndo_change_mtu	= can_change_mtu,
1314 };
1315 
1316 /**
1317  * xcan_suspend - Suspend method for the driver
1318  * @dev:	Address of the device structure
1319  *
1320  * Put the driver into low power mode.
1321  * Return: 0 on success and failure value on error
1322  */
1323 static int __maybe_unused xcan_suspend(struct device *dev)
1324 {
1325 	struct net_device *ndev = dev_get_drvdata(dev);
1326 
1327 	if (netif_running(ndev)) {
1328 		netif_stop_queue(ndev);
1329 		netif_device_detach(ndev);
1330 		xcan_chip_stop(ndev);
1331 	}
1332 
1333 	return pm_runtime_force_suspend(dev);
1334 }
1335 
1336 /**
1337  * xcan_resume - Resume from suspend
1338  * @dev:	Address of the device structure
1339  *
1340  * Resume operation after suspend.
1341  * Return: 0 on success and failure value on error
1342  */
1343 static int __maybe_unused xcan_resume(struct device *dev)
1344 {
1345 	struct net_device *ndev = dev_get_drvdata(dev);
1346 	int ret;
1347 
1348 	ret = pm_runtime_force_resume(dev);
1349 	if (ret) {
1350 		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1351 		return ret;
1352 	}
1353 
1354 	if (netif_running(ndev)) {
1355 		ret = xcan_chip_start(ndev);
1356 		if (ret) {
1357 			dev_err(dev, "xcan_chip_start failed on resume\n");
1358 			return ret;
1359 		}
1360 
1361 		netif_device_attach(ndev);
1362 		netif_start_queue(ndev);
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 /**
1369  * xcan_runtime_suspend - Runtime suspend method for the driver
1370  * @dev:	Address of the device structure
1371  *
1372  * Put the driver into low power mode.
1373  * Return: 0 always
1374  */
1375 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1376 {
1377 	struct net_device *ndev = dev_get_drvdata(dev);
1378 	struct xcan_priv *priv = netdev_priv(ndev);
1379 
1380 	clk_disable_unprepare(priv->bus_clk);
1381 	clk_disable_unprepare(priv->can_clk);
1382 
1383 	return 0;
1384 }
1385 
1386 /**
1387  * xcan_runtime_resume - Runtime resume from suspend
1388  * @dev:	Address of the device structure
1389  *
1390  * Resume operation after suspend.
1391  * Return: 0 on success and failure value on error
1392  */
1393 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1394 {
1395 	struct net_device *ndev = dev_get_drvdata(dev);
1396 	struct xcan_priv *priv = netdev_priv(ndev);
1397 	int ret;
1398 
1399 	ret = clk_prepare_enable(priv->bus_clk);
1400 	if (ret) {
1401 		dev_err(dev, "Cannot enable clock.\n");
1402 		return ret;
1403 	}
1404 	ret = clk_prepare_enable(priv->can_clk);
1405 	if (ret) {
1406 		dev_err(dev, "Cannot enable clock.\n");
1407 		clk_disable_unprepare(priv->bus_clk);
1408 		return ret;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 static const struct dev_pm_ops xcan_dev_pm_ops = {
1415 	SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1416 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1417 };
1418 
1419 static const struct xcan_devtype_data xcan_zynq_data = {
1420 	.bittiming_const = &xcan_bittiming_const,
1421 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1422 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1423 	.bus_clk_name = "pclk",
1424 };
1425 
1426 static const struct xcan_devtype_data xcan_axi_data = {
1427 	.bittiming_const = &xcan_bittiming_const,
1428 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1429 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1430 	.bus_clk_name = "s_axi_aclk",
1431 };
1432 
1433 static const struct xcan_devtype_data xcan_canfd_data = {
1434 	.flags = XCAN_FLAG_EXT_FILTERS |
1435 		 XCAN_FLAG_RXMNF |
1436 		 XCAN_FLAG_TX_MAILBOXES |
1437 		 XCAN_FLAG_RX_FIFO_MULTI,
1438 	.bittiming_const = &xcan_bittiming_const,
1439 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1440 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1441 	.bus_clk_name = "s_axi_aclk",
1442 };
1443 
1444 static const struct xcan_devtype_data xcan_canfd2_data = {
1445 	.flags = XCAN_FLAG_EXT_FILTERS |
1446 		 XCAN_FLAG_RXMNF |
1447 		 XCAN_FLAG_TX_MAILBOXES |
1448 		 XCAN_FLAG_CANFD_2 |
1449 		 XCAN_FLAG_RX_FIFO_MULTI,
1450 	.bittiming_const = &xcan_bittiming_const_canfd2,
1451 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1452 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1453 	.bus_clk_name = "s_axi_aclk",
1454 };
1455 
1456 /* Match table for OF platform binding */
1457 static const struct of_device_id xcan_of_match[] = {
1458 	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1459 	{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1460 	{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1461 	{ .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1462 	{ /* end of list */ },
1463 };
1464 MODULE_DEVICE_TABLE(of, xcan_of_match);
1465 
1466 /**
1467  * xcan_probe - Platform registration call
1468  * @pdev:	Handle to the platform device structure
1469  *
1470  * This function does all the memory allocation and registration for the CAN
1471  * device.
1472  *
1473  * Return: 0 on success and failure value on error
1474  */
1475 static int xcan_probe(struct platform_device *pdev)
1476 {
1477 	struct resource *res; /* IO mem resources */
1478 	struct net_device *ndev;
1479 	struct xcan_priv *priv;
1480 	const struct of_device_id *of_id;
1481 	const struct xcan_devtype_data *devtype = &xcan_axi_data;
1482 	void __iomem *addr;
1483 	int ret;
1484 	int rx_max, tx_max;
1485 	int hw_tx_max, hw_rx_max;
1486 	const char *hw_tx_max_property;
1487 
1488 	/* Get the virtual base address for the device */
1489 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1490 	addr = devm_ioremap_resource(&pdev->dev, res);
1491 	if (IS_ERR(addr)) {
1492 		ret = PTR_ERR(addr);
1493 		goto err;
1494 	}
1495 
1496 	of_id = of_match_device(xcan_of_match, &pdev->dev);
1497 	if (of_id && of_id->data)
1498 		devtype = of_id->data;
1499 
1500 	hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1501 			     "tx-mailbox-count" : "tx-fifo-depth";
1502 
1503 	ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1504 				   &hw_tx_max);
1505 	if (ret < 0) {
1506 		dev_err(&pdev->dev, "missing %s property\n",
1507 			hw_tx_max_property);
1508 		goto err;
1509 	}
1510 
1511 	ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1512 				   &hw_rx_max);
1513 	if (ret < 0) {
1514 		dev_err(&pdev->dev,
1515 			"missing rx-fifo-depth property (mailbox mode is not supported)\n");
1516 		goto err;
1517 	}
1518 
1519 	/* With TX FIFO:
1520 	 *
1521 	 * There is no way to directly figure out how many frames have been
1522 	 * sent when the TXOK interrupt is processed. If TXFEMP
1523 	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1524 	 * to determine if 1 or 2 frames have been sent.
1525 	 * Theoretically we should be able to use TXFWMEMP to determine up
1526 	 * to 3 frames, but it seems that after putting a second frame in the
1527 	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1528 	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1529 	 * sent), which is not a sensible state - possibly TXFWMEMP is not
1530 	 * completely synchronized with the rest of the bits?
1531 	 *
1532 	 * With TX mailboxes:
1533 	 *
1534 	 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1535 	 * we submit frames one at a time.
1536 	 */
1537 	if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1538 	    (devtype->flags & XCAN_FLAG_TXFEMP))
1539 		tx_max = min(hw_tx_max, 2);
1540 	else
1541 		tx_max = 1;
1542 
1543 	rx_max = hw_rx_max;
1544 
1545 	/* Create a CAN device instance */
1546 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1547 	if (!ndev)
1548 		return -ENOMEM;
1549 
1550 	priv = netdev_priv(ndev);
1551 	priv->dev = &pdev->dev;
1552 	priv->can.bittiming_const = devtype->bittiming_const;
1553 	priv->can.do_set_mode = xcan_do_set_mode;
1554 	priv->can.do_get_berr_counter = xcan_get_berr_counter;
1555 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1556 					CAN_CTRLMODE_BERR_REPORTING;
1557 	priv->reg_base = addr;
1558 	priv->tx_max = tx_max;
1559 	priv->devtype = *devtype;
1560 	spin_lock_init(&priv->tx_lock);
1561 
1562 	/* Get IRQ for the device */
1563 	ndev->irq = platform_get_irq(pdev, 0);
1564 	ndev->flags |= IFF_ECHO;	/* We support local echo */
1565 
1566 	platform_set_drvdata(pdev, ndev);
1567 	SET_NETDEV_DEV(ndev, &pdev->dev);
1568 	ndev->netdev_ops = &xcan_netdev_ops;
1569 
1570 	/* Getting the CAN can_clk info */
1571 	priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1572 	if (IS_ERR(priv->can_clk)) {
1573 		dev_err(&pdev->dev, "Device clock not found.\n");
1574 		ret = PTR_ERR(priv->can_clk);
1575 		goto err_free;
1576 	}
1577 
1578 	priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1579 	if (IS_ERR(priv->bus_clk)) {
1580 		dev_err(&pdev->dev, "bus clock not found\n");
1581 		ret = PTR_ERR(priv->bus_clk);
1582 		goto err_free;
1583 	}
1584 
1585 	priv->write_reg = xcan_write_reg_le;
1586 	priv->read_reg = xcan_read_reg_le;
1587 
1588 	pm_runtime_enable(&pdev->dev);
1589 	ret = pm_runtime_get_sync(&pdev->dev);
1590 	if (ret < 0) {
1591 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1592 			__func__, ret);
1593 		goto err_pmdisable;
1594 	}
1595 
1596 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1597 		priv->write_reg = xcan_write_reg_be;
1598 		priv->read_reg = xcan_read_reg_be;
1599 	}
1600 
1601 	priv->can.clock.freq = clk_get_rate(priv->can_clk);
1602 
1603 	netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1604 
1605 	ret = register_candev(ndev);
1606 	if (ret) {
1607 		dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1608 		goto err_disableclks;
1609 	}
1610 
1611 	devm_can_led_init(ndev);
1612 
1613 	pm_runtime_put(&pdev->dev);
1614 
1615 	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1616 		   priv->reg_base, ndev->irq, priv->can.clock.freq,
1617 		   hw_tx_max, priv->tx_max);
1618 
1619 	return 0;
1620 
1621 err_disableclks:
1622 	pm_runtime_put(priv->dev);
1623 err_pmdisable:
1624 	pm_runtime_disable(&pdev->dev);
1625 err_free:
1626 	free_candev(ndev);
1627 err:
1628 	return ret;
1629 }
1630 
1631 /**
1632  * xcan_remove - Unregister the device after releasing the resources
1633  * @pdev:	Handle to the platform device structure
1634  *
1635  * This function frees all the resources allocated to the device.
1636  * Return: 0 always
1637  */
1638 static int xcan_remove(struct platform_device *pdev)
1639 {
1640 	struct net_device *ndev = platform_get_drvdata(pdev);
1641 	struct xcan_priv *priv = netdev_priv(ndev);
1642 
1643 	unregister_candev(ndev);
1644 	pm_runtime_disable(&pdev->dev);
1645 	netif_napi_del(&priv->napi);
1646 	free_candev(ndev);
1647 
1648 	return 0;
1649 }
1650 
1651 static struct platform_driver xcan_driver = {
1652 	.probe = xcan_probe,
1653 	.remove	= xcan_remove,
1654 	.driver	= {
1655 		.name = DRIVER_NAME,
1656 		.pm = &xcan_dev_pm_ops,
1657 		.of_match_table	= xcan_of_match,
1658 	},
1659 };
1660 
1661 module_platform_driver(xcan_driver);
1662 
1663 MODULE_LICENSE("GPL");
1664 MODULE_AUTHOR("Xilinx Inc");
1665 MODULE_DESCRIPTION("Xilinx CAN interface");
1666