xref: /openbmc/linux/drivers/net/can/xilinx_can.c (revision d5dbb2e8)
1 /* Xilinx CAN device driver
2  *
3  * Copyright (C) 2012 - 2014 Xilinx, Inc.
4  * Copyright (C) 2009 PetaLogix. All rights reserved.
5  * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
6  *
7  * Description:
8  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation, either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <linux/clk.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/netdevice.h>
28 #include <linux/of.h>
29 #include <linux/of_device.h>
30 #include <linux/platform_device.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 #include <linux/types.h>
35 #include <linux/can/dev.h>
36 #include <linux/can/error.h>
37 #include <linux/can/led.h>
38 #include <linux/pm_runtime.h>
39 
40 #define DRIVER_NAME	"xilinx_can"
41 
42 /* CAN registers set */
43 enum xcan_reg {
44 	XCAN_SRR_OFFSET		= 0x00, /* Software reset */
45 	XCAN_MSR_OFFSET		= 0x04, /* Mode select */
46 	XCAN_BRPR_OFFSET	= 0x08, /* Baud rate prescaler */
47 	XCAN_BTR_OFFSET		= 0x0C, /* Bit timing */
48 	XCAN_ECR_OFFSET		= 0x10, /* Error counter */
49 	XCAN_ESR_OFFSET		= 0x14, /* Error status */
50 	XCAN_SR_OFFSET		= 0x18, /* Status */
51 	XCAN_ISR_OFFSET		= 0x1C, /* Interrupt status */
52 	XCAN_IER_OFFSET		= 0x20, /* Interrupt enable */
53 	XCAN_ICR_OFFSET		= 0x24, /* Interrupt clear */
54 
55 	/* not on CAN FD cores */
56 	XCAN_TXFIFO_OFFSET	= 0x30, /* TX FIFO base */
57 	XCAN_RXFIFO_OFFSET	= 0x50, /* RX FIFO base */
58 	XCAN_AFR_OFFSET		= 0x60, /* Acceptance Filter */
59 
60 	/* only on CAN FD cores */
61 	XCAN_TRR_OFFSET		= 0x0090, /* TX Buffer Ready Request */
62 	XCAN_AFR_EXT_OFFSET	= 0x00E0, /* Acceptance Filter */
63 	XCAN_FSR_OFFSET		= 0x00E8, /* RX FIFO Status */
64 	XCAN_TXMSG_BASE_OFFSET	= 0x0100, /* TX Message Space */
65 	XCAN_RXMSG_BASE_OFFSET	= 0x1100, /* RX Message Space */
66 	XCAN_RXMSG_2_BASE_OFFSET	= 0x2100, /* RX Message Space */
67 };
68 
69 #define XCAN_FRAME_ID_OFFSET(frame_base)	((frame_base) + 0x00)
70 #define XCAN_FRAME_DLC_OFFSET(frame_base)	((frame_base) + 0x04)
71 #define XCAN_FRAME_DW1_OFFSET(frame_base)	((frame_base) + 0x08)
72 #define XCAN_FRAME_DW2_OFFSET(frame_base)	((frame_base) + 0x0C)
73 
74 #define XCAN_CANFD_FRAME_SIZE		0x48
75 #define XCAN_TXMSG_FRAME_OFFSET(n)	(XCAN_TXMSG_BASE_OFFSET + \
76 					 XCAN_CANFD_FRAME_SIZE * (n))
77 #define XCAN_RXMSG_FRAME_OFFSET(n)	(XCAN_RXMSG_BASE_OFFSET + \
78 					 XCAN_CANFD_FRAME_SIZE * (n))
79 #define XCAN_RXMSG_2_FRAME_OFFSET(n)	(XCAN_RXMSG_2_BASE_OFFSET + \
80 					 XCAN_CANFD_FRAME_SIZE * (n))
81 
82 /* the single TX mailbox used by this driver on CAN FD HW */
83 #define XCAN_TX_MAILBOX_IDX		0
84 
85 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
86 #define XCAN_SRR_CEN_MASK		0x00000002 /* CAN enable */
87 #define XCAN_SRR_RESET_MASK		0x00000001 /* Soft Reset the CAN core */
88 #define XCAN_MSR_LBACK_MASK		0x00000002 /* Loop back mode select */
89 #define XCAN_MSR_SLEEP_MASK		0x00000001 /* Sleep mode select */
90 #define XCAN_BRPR_BRP_MASK		0x000000FF /* Baud rate prescaler */
91 #define XCAN_BTR_SJW_MASK		0x00000180 /* Synchronous jump width */
92 #define XCAN_BTR_TS2_MASK		0x00000070 /* Time segment 2 */
93 #define XCAN_BTR_TS1_MASK		0x0000000F /* Time segment 1 */
94 #define XCAN_BTR_SJW_MASK_CANFD		0x000F0000 /* Synchronous jump width */
95 #define XCAN_BTR_TS2_MASK_CANFD		0x00000F00 /* Time segment 2 */
96 #define XCAN_BTR_TS1_MASK_CANFD		0x0000003F /* Time segment 1 */
97 #define XCAN_ECR_REC_MASK		0x0000FF00 /* Receive error counter */
98 #define XCAN_ECR_TEC_MASK		0x000000FF /* Transmit error counter */
99 #define XCAN_ESR_ACKER_MASK		0x00000010 /* ACK error */
100 #define XCAN_ESR_BERR_MASK		0x00000008 /* Bit error */
101 #define XCAN_ESR_STER_MASK		0x00000004 /* Stuff error */
102 #define XCAN_ESR_FMER_MASK		0x00000002 /* Form error */
103 #define XCAN_ESR_CRCER_MASK		0x00000001 /* CRC error */
104 #define XCAN_SR_TXFLL_MASK		0x00000400 /* TX FIFO is full */
105 #define XCAN_SR_ESTAT_MASK		0x00000180 /* Error status */
106 #define XCAN_SR_ERRWRN_MASK		0x00000040 /* Error warning */
107 #define XCAN_SR_NORMAL_MASK		0x00000008 /* Normal mode */
108 #define XCAN_SR_LBACK_MASK		0x00000002 /* Loop back mode */
109 #define XCAN_SR_CONFIG_MASK		0x00000001 /* Configuration mode */
110 #define XCAN_IXR_RXMNF_MASK		0x00020000 /* RX match not finished */
111 #define XCAN_IXR_TXFEMP_MASK		0x00004000 /* TX FIFO Empty */
112 #define XCAN_IXR_WKUP_MASK		0x00000800 /* Wake up interrupt */
113 #define XCAN_IXR_SLP_MASK		0x00000400 /* Sleep interrupt */
114 #define XCAN_IXR_BSOFF_MASK		0x00000200 /* Bus off interrupt */
115 #define XCAN_IXR_ERROR_MASK		0x00000100 /* Error interrupt */
116 #define XCAN_IXR_RXNEMP_MASK		0x00000080 /* RX FIFO NotEmpty intr */
117 #define XCAN_IXR_RXOFLW_MASK		0x00000040 /* RX FIFO Overflow intr */
118 #define XCAN_IXR_RXOK_MASK		0x00000010 /* Message received intr */
119 #define XCAN_IXR_TXFLL_MASK		0x00000004 /* Tx FIFO Full intr */
120 #define XCAN_IXR_TXOK_MASK		0x00000002 /* TX successful intr */
121 #define XCAN_IXR_ARBLST_MASK		0x00000001 /* Arbitration lost intr */
122 #define XCAN_IDR_ID1_MASK		0xFFE00000 /* Standard msg identifier */
123 #define XCAN_IDR_SRR_MASK		0x00100000 /* Substitute remote TXreq */
124 #define XCAN_IDR_IDE_MASK		0x00080000 /* Identifier extension */
125 #define XCAN_IDR_ID2_MASK		0x0007FFFE /* Extended message ident */
126 #define XCAN_IDR_RTR_MASK		0x00000001 /* Remote TX request */
127 #define XCAN_DLCR_DLC_MASK		0xF0000000 /* Data length code */
128 #define XCAN_FSR_FL_MASK		0x00003F00 /* RX Fill Level */
129 #define XCAN_FSR_IRI_MASK		0x00000080 /* RX Increment Read Index */
130 #define XCAN_FSR_RI_MASK		0x0000001F /* RX Read Index */
131 
132 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
133 #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
134 #define XCAN_BTR_TS2_SHIFT		4  /* Time segment 2 */
135 #define XCAN_BTR_SJW_SHIFT_CANFD	16 /* Synchronous jump width */
136 #define XCAN_BTR_TS2_SHIFT_CANFD	8  /* Time segment 2 */
137 #define XCAN_IDR_ID1_SHIFT		21 /* Standard Messg Identifier */
138 #define XCAN_IDR_ID2_SHIFT		1  /* Extended Message Identifier */
139 #define XCAN_DLCR_DLC_SHIFT		28 /* Data length code */
140 #define XCAN_ESR_REC_SHIFT		8  /* Rx Error Count */
141 
142 /* CAN frame length constants */
143 #define XCAN_FRAME_MAX_DATA_LEN		8
144 #define XCAN_TIMEOUT			(1 * HZ)
145 
146 /* TX-FIFO-empty interrupt available */
147 #define XCAN_FLAG_TXFEMP	0x0001
148 /* RX Match Not Finished interrupt available */
149 #define XCAN_FLAG_RXMNF		0x0002
150 /* Extended acceptance filters with control at 0xE0 */
151 #define XCAN_FLAG_EXT_FILTERS	0x0004
152 /* TX mailboxes instead of TX FIFO */
153 #define XCAN_FLAG_TX_MAILBOXES	0x0008
154 /* RX FIFO with each buffer in separate registers at 0x1100
155  * instead of the regular FIFO at 0x50
156  */
157 #define XCAN_FLAG_RX_FIFO_MULTI	0x0010
158 #define XCAN_FLAG_CANFD_2	0x0020
159 
160 struct xcan_devtype_data {
161 	unsigned int flags;
162 	const struct can_bittiming_const *bittiming_const;
163 	const char *bus_clk_name;
164 	unsigned int btr_ts2_shift;
165 	unsigned int btr_sjw_shift;
166 };
167 
168 /**
169  * struct xcan_priv - This definition define CAN driver instance
170  * @can:			CAN private data structure.
171  * @tx_lock:			Lock for synchronizing TX interrupt handling
172  * @tx_head:			Tx CAN packets ready to send on the queue
173  * @tx_tail:			Tx CAN packets successfully sended on the queue
174  * @tx_max:			Maximum number packets the driver can send
175  * @napi:			NAPI structure
176  * @read_reg:			For reading data from CAN registers
177  * @write_reg:			For writing data to CAN registers
178  * @dev:			Network device data structure
179  * @reg_base:			Ioremapped address to registers
180  * @irq_flags:			For request_irq()
181  * @bus_clk:			Pointer to struct clk
182  * @can_clk:			Pointer to struct clk
183  * @devtype:			Device type specific constants
184  */
185 struct xcan_priv {
186 	struct can_priv can;
187 	spinlock_t tx_lock;
188 	unsigned int tx_head;
189 	unsigned int tx_tail;
190 	unsigned int tx_max;
191 	struct napi_struct napi;
192 	u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
193 	void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
194 			u32 val);
195 	struct device *dev;
196 	void __iomem *reg_base;
197 	unsigned long irq_flags;
198 	struct clk *bus_clk;
199 	struct clk *can_clk;
200 	struct xcan_devtype_data devtype;
201 };
202 
203 /* CAN Bittiming constants as per Xilinx CAN specs */
204 static const struct can_bittiming_const xcan_bittiming_const = {
205 	.name = DRIVER_NAME,
206 	.tseg1_min = 1,
207 	.tseg1_max = 16,
208 	.tseg2_min = 1,
209 	.tseg2_max = 8,
210 	.sjw_max = 4,
211 	.brp_min = 1,
212 	.brp_max = 256,
213 	.brp_inc = 1,
214 };
215 
216 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
217 	.name = DRIVER_NAME,
218 	.tseg1_min = 1,
219 	.tseg1_max = 64,
220 	.tseg2_min = 1,
221 	.tseg2_max = 16,
222 	.sjw_max = 16,
223 	.brp_min = 1,
224 	.brp_max = 256,
225 	.brp_inc = 1,
226 };
227 
228 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
229 	.name = DRIVER_NAME,
230 	.tseg1_min = 1,
231 	.tseg1_max = 256,
232 	.tseg2_min = 1,
233 	.tseg2_max = 128,
234 	.sjw_max = 128,
235 	.brp_min = 1,
236 	.brp_max = 256,
237 	.brp_inc = 1,
238 };
239 
240 /**
241  * xcan_write_reg_le - Write a value to the device register little endian
242  * @priv:	Driver private data structure
243  * @reg:	Register offset
244  * @val:	Value to write at the Register offset
245  *
246  * Write data to the paricular CAN register
247  */
248 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
249 			u32 val)
250 {
251 	iowrite32(val, priv->reg_base + reg);
252 }
253 
254 /**
255  * xcan_read_reg_le - Read a value from the device register little endian
256  * @priv:	Driver private data structure
257  * @reg:	Register offset
258  *
259  * Read data from the particular CAN register
260  * Return: value read from the CAN register
261  */
262 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
263 {
264 	return ioread32(priv->reg_base + reg);
265 }
266 
267 /**
268  * xcan_write_reg_be - Write a value to the device register big endian
269  * @priv:	Driver private data structure
270  * @reg:	Register offset
271  * @val:	Value to write at the Register offset
272  *
273  * Write data to the paricular CAN register
274  */
275 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
276 			u32 val)
277 {
278 	iowrite32be(val, priv->reg_base + reg);
279 }
280 
281 /**
282  * xcan_read_reg_be - Read a value from the device register big endian
283  * @priv:	Driver private data structure
284  * @reg:	Register offset
285  *
286  * Read data from the particular CAN register
287  * Return: value read from the CAN register
288  */
289 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
290 {
291 	return ioread32be(priv->reg_base + reg);
292 }
293 
294 /**
295  * xcan_rx_int_mask - Get the mask for the receive interrupt
296  * @priv:	Driver private data structure
297  *
298  * Return: The receive interrupt mask used by the driver on this HW
299  */
300 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
301 {
302 	/* RXNEMP is better suited for our use case as it cannot be cleared
303 	 * while the FIFO is non-empty, but CAN FD HW does not have it
304 	 */
305 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
306 		return XCAN_IXR_RXOK_MASK;
307 	else
308 		return XCAN_IXR_RXNEMP_MASK;
309 }
310 
311 /**
312  * set_reset_mode - Resets the CAN device mode
313  * @ndev:	Pointer to net_device structure
314  *
315  * This is the driver reset mode routine.The driver
316  * enters into configuration mode.
317  *
318  * Return: 0 on success and failure value on error
319  */
320 static int set_reset_mode(struct net_device *ndev)
321 {
322 	struct xcan_priv *priv = netdev_priv(ndev);
323 	unsigned long timeout;
324 
325 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
326 
327 	timeout = jiffies + XCAN_TIMEOUT;
328 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
329 		if (time_after(jiffies, timeout)) {
330 			netdev_warn(ndev, "timed out for config mode\n");
331 			return -ETIMEDOUT;
332 		}
333 		usleep_range(500, 10000);
334 	}
335 
336 	/* reset clears FIFOs */
337 	priv->tx_head = 0;
338 	priv->tx_tail = 0;
339 
340 	return 0;
341 }
342 
343 /**
344  * xcan_set_bittiming - CAN set bit timing routine
345  * @ndev:	Pointer to net_device structure
346  *
347  * This is the driver set bittiming  routine.
348  * Return: 0 on success and failure value on error
349  */
350 static int xcan_set_bittiming(struct net_device *ndev)
351 {
352 	struct xcan_priv *priv = netdev_priv(ndev);
353 	struct can_bittiming *bt = &priv->can.bittiming;
354 	u32 btr0, btr1;
355 	u32 is_config_mode;
356 
357 	/* Check whether Xilinx CAN is in configuration mode.
358 	 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
359 	 */
360 	is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
361 				XCAN_SR_CONFIG_MASK;
362 	if (!is_config_mode) {
363 		netdev_alert(ndev,
364 		     "BUG! Cannot set bittiming - CAN is not in config mode\n");
365 		return -EPERM;
366 	}
367 
368 	/* Setting Baud Rate prescalar value in BRPR Register */
369 	btr0 = (bt->brp - 1);
370 
371 	/* Setting Time Segment 1 in BTR Register */
372 	btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
373 
374 	/* Setting Time Segment 2 in BTR Register */
375 	btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
376 
377 	/* Setting Synchronous jump width in BTR Register */
378 	btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
379 
380 	priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
381 	priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
382 
383 	netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
384 			priv->read_reg(priv, XCAN_BRPR_OFFSET),
385 			priv->read_reg(priv, XCAN_BTR_OFFSET));
386 
387 	return 0;
388 }
389 
390 /**
391  * xcan_chip_start - This the drivers start routine
392  * @ndev:	Pointer to net_device structure
393  *
394  * This is the drivers start routine.
395  * Based on the State of the CAN device it puts
396  * the CAN device into a proper mode.
397  *
398  * Return: 0 on success and failure value on error
399  */
400 static int xcan_chip_start(struct net_device *ndev)
401 {
402 	struct xcan_priv *priv = netdev_priv(ndev);
403 	u32 reg_msr, reg_sr_mask;
404 	int err;
405 	unsigned long timeout;
406 	u32 ier;
407 
408 	/* Check if it is in reset mode */
409 	err = set_reset_mode(ndev);
410 	if (err < 0)
411 		return err;
412 
413 	err = xcan_set_bittiming(ndev);
414 	if (err < 0)
415 		return err;
416 
417 	/* Enable interrupts */
418 	ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
419 		XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
420 		XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
421 		XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
422 
423 	if (priv->devtype.flags & XCAN_FLAG_RXMNF)
424 		ier |= XCAN_IXR_RXMNF_MASK;
425 
426 	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
427 
428 	/* Check whether it is loopback mode or normal mode  */
429 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
430 		reg_msr = XCAN_MSR_LBACK_MASK;
431 		reg_sr_mask = XCAN_SR_LBACK_MASK;
432 	} else {
433 		reg_msr = 0x0;
434 		reg_sr_mask = XCAN_SR_NORMAL_MASK;
435 	}
436 
437 	/* enable the first extended filter, if any, as cores with extended
438 	 * filtering default to non-receipt if all filters are disabled
439 	 */
440 	if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
441 		priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
442 
443 	priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
444 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
445 
446 	timeout = jiffies + XCAN_TIMEOUT;
447 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
448 		if (time_after(jiffies, timeout)) {
449 			netdev_warn(ndev,
450 				"timed out for correct mode\n");
451 			return -ETIMEDOUT;
452 		}
453 	}
454 	netdev_dbg(ndev, "status:#x%08x\n",
455 			priv->read_reg(priv, XCAN_SR_OFFSET));
456 
457 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
458 	return 0;
459 }
460 
461 /**
462  * xcan_do_set_mode - This sets the mode of the driver
463  * @ndev:	Pointer to net_device structure
464  * @mode:	Tells the mode of the driver
465  *
466  * This check the drivers state and calls the
467  * the corresponding modes to set.
468  *
469  * Return: 0 on success and failure value on error
470  */
471 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
472 {
473 	int ret;
474 
475 	switch (mode) {
476 	case CAN_MODE_START:
477 		ret = xcan_chip_start(ndev);
478 		if (ret < 0) {
479 			netdev_err(ndev, "xcan_chip_start failed!\n");
480 			return ret;
481 		}
482 		netif_wake_queue(ndev);
483 		break;
484 	default:
485 		ret = -EOPNOTSUPP;
486 		break;
487 	}
488 
489 	return ret;
490 }
491 
492 /**
493  * xcan_write_frame - Write a frame to HW
494  * @skb:		sk_buff pointer that contains data to be Txed
495  * @frame_offset:	Register offset to write the frame to
496  */
497 static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
498 			     int frame_offset)
499 {
500 	u32 id, dlc, data[2] = {0, 0};
501 	struct can_frame *cf = (struct can_frame *)skb->data;
502 
503 	/* Watch carefully on the bit sequence */
504 	if (cf->can_id & CAN_EFF_FLAG) {
505 		/* Extended CAN ID format */
506 		id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
507 			XCAN_IDR_ID2_MASK;
508 		id |= (((cf->can_id & CAN_EFF_MASK) >>
509 			(CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
510 			XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
511 
512 		/* The substibute remote TX request bit should be "1"
513 		 * for extended frames as in the Xilinx CAN datasheet
514 		 */
515 		id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
516 
517 		if (cf->can_id & CAN_RTR_FLAG)
518 			/* Extended frames remote TX request */
519 			id |= XCAN_IDR_RTR_MASK;
520 	} else {
521 		/* Standard CAN ID format */
522 		id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
523 			XCAN_IDR_ID1_MASK;
524 
525 		if (cf->can_id & CAN_RTR_FLAG)
526 			/* Standard frames remote TX request */
527 			id |= XCAN_IDR_SRR_MASK;
528 	}
529 
530 	dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
531 
532 	if (cf->can_dlc > 0)
533 		data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
534 	if (cf->can_dlc > 4)
535 		data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
536 
537 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
538 	/* If the CAN frame is RTR frame this write triggers transmission
539 	 * (not on CAN FD)
540 	 */
541 	priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
542 	if (!(cf->can_id & CAN_RTR_FLAG)) {
543 		priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
544 				data[0]);
545 		/* If the CAN frame is Standard/Extended frame this
546 		 * write triggers transmission (not on CAN FD)
547 		 */
548 		priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
549 				data[1]);
550 	}
551 }
552 
553 /**
554  * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
555  *
556  * Return: 0 on success, -ENOSPC if FIFO is full.
557  */
558 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
559 {
560 	struct xcan_priv *priv = netdev_priv(ndev);
561 	unsigned long flags;
562 
563 	/* Check if the TX buffer is full */
564 	if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
565 			XCAN_SR_TXFLL_MASK))
566 		return -ENOSPC;
567 
568 	can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
569 
570 	spin_lock_irqsave(&priv->tx_lock, flags);
571 
572 	priv->tx_head++;
573 
574 	xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
575 
576 	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
577 	if (priv->tx_max > 1)
578 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
579 
580 	/* Check if the TX buffer is full */
581 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
582 		netif_stop_queue(ndev);
583 
584 	spin_unlock_irqrestore(&priv->tx_lock, flags);
585 
586 	return 0;
587 }
588 
589 /**
590  * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
591  *
592  * Return: 0 on success, -ENOSPC if there is no space
593  */
594 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
595 {
596 	struct xcan_priv *priv = netdev_priv(ndev);
597 	unsigned long flags;
598 
599 	if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
600 		     BIT(XCAN_TX_MAILBOX_IDX)))
601 		return -ENOSPC;
602 
603 	can_put_echo_skb(skb, ndev, 0);
604 
605 	spin_lock_irqsave(&priv->tx_lock, flags);
606 
607 	priv->tx_head++;
608 
609 	xcan_write_frame(priv, skb,
610 			 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
611 
612 	/* Mark buffer as ready for transmit */
613 	priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
614 
615 	netif_stop_queue(ndev);
616 
617 	spin_unlock_irqrestore(&priv->tx_lock, flags);
618 
619 	return 0;
620 }
621 
622 /**
623  * xcan_start_xmit - Starts the transmission
624  * @skb:	sk_buff pointer that contains data to be Txed
625  * @ndev:	Pointer to net_device structure
626  *
627  * This function is invoked from upper layers to initiate transmission.
628  *
629  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
630  */
631 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
632 {
633 	struct xcan_priv *priv = netdev_priv(ndev);
634 	int ret;
635 
636 	if (can_dropped_invalid_skb(ndev, skb))
637 		return NETDEV_TX_OK;
638 
639 	if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
640 		ret = xcan_start_xmit_mailbox(skb, ndev);
641 	else
642 		ret = xcan_start_xmit_fifo(skb, ndev);
643 
644 	if (ret < 0) {
645 		netdev_err(ndev, "BUG!, TX full when queue awake!\n");
646 		netif_stop_queue(ndev);
647 		return NETDEV_TX_BUSY;
648 	}
649 
650 	return NETDEV_TX_OK;
651 }
652 
653 /**
654  * xcan_rx -  Is called from CAN isr to complete the received
655  *		frame  processing
656  * @ndev:	Pointer to net_device structure
657  * @frame_base:	Register offset to the frame to be read
658  *
659  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
660  * does minimal processing and invokes "netif_receive_skb" to complete further
661  * processing.
662  * Return: 1 on success and 0 on failure.
663  */
664 static int xcan_rx(struct net_device *ndev, int frame_base)
665 {
666 	struct xcan_priv *priv = netdev_priv(ndev);
667 	struct net_device_stats *stats = &ndev->stats;
668 	struct can_frame *cf;
669 	struct sk_buff *skb;
670 	u32 id_xcan, dlc, data[2] = {0, 0};
671 
672 	skb = alloc_can_skb(ndev, &cf);
673 	if (unlikely(!skb)) {
674 		stats->rx_dropped++;
675 		return 0;
676 	}
677 
678 	/* Read a frame from Xilinx zynq CANPS */
679 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
680 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
681 				   XCAN_DLCR_DLC_SHIFT;
682 
683 	/* Change Xilinx CAN data length format to socketCAN data format */
684 	cf->can_dlc = get_can_dlc(dlc);
685 
686 	/* Change Xilinx CAN ID format to socketCAN ID format */
687 	if (id_xcan & XCAN_IDR_IDE_MASK) {
688 		/* The received frame is an Extended format frame */
689 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
690 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
691 				XCAN_IDR_ID2_SHIFT;
692 		cf->can_id |= CAN_EFF_FLAG;
693 		if (id_xcan & XCAN_IDR_RTR_MASK)
694 			cf->can_id |= CAN_RTR_FLAG;
695 	} else {
696 		/* The received frame is a standard format frame */
697 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
698 				XCAN_IDR_ID1_SHIFT;
699 		if (id_xcan & XCAN_IDR_SRR_MASK)
700 			cf->can_id |= CAN_RTR_FLAG;
701 	}
702 
703 	/* DW1/DW2 must always be read to remove message from RXFIFO */
704 	data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
705 	data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
706 
707 	if (!(cf->can_id & CAN_RTR_FLAG)) {
708 		/* Change Xilinx CAN data format to socketCAN data format */
709 		if (cf->can_dlc > 0)
710 			*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
711 		if (cf->can_dlc > 4)
712 			*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
713 	}
714 
715 	stats->rx_bytes += cf->can_dlc;
716 	stats->rx_packets++;
717 	netif_receive_skb(skb);
718 
719 	return 1;
720 }
721 
722 /**
723  * xcan_current_error_state - Get current error state from HW
724  * @ndev:	Pointer to net_device structure
725  *
726  * Checks the current CAN error state from the HW. Note that this
727  * only checks for ERROR_PASSIVE and ERROR_WARNING.
728  *
729  * Return:
730  * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
731  * otherwise.
732  */
733 static enum can_state xcan_current_error_state(struct net_device *ndev)
734 {
735 	struct xcan_priv *priv = netdev_priv(ndev);
736 	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
737 
738 	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
739 		return CAN_STATE_ERROR_PASSIVE;
740 	else if (status & XCAN_SR_ERRWRN_MASK)
741 		return CAN_STATE_ERROR_WARNING;
742 	else
743 		return CAN_STATE_ERROR_ACTIVE;
744 }
745 
746 /**
747  * xcan_set_error_state - Set new CAN error state
748  * @ndev:	Pointer to net_device structure
749  * @new_state:	The new CAN state to be set
750  * @cf:		Error frame to be populated or NULL
751  *
752  * Set new CAN error state for the device, updating statistics and
753  * populating the error frame if given.
754  */
755 static void xcan_set_error_state(struct net_device *ndev,
756 				 enum can_state new_state,
757 				 struct can_frame *cf)
758 {
759 	struct xcan_priv *priv = netdev_priv(ndev);
760 	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
761 	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
762 	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
763 	enum can_state tx_state = txerr >= rxerr ? new_state : 0;
764 	enum can_state rx_state = txerr <= rxerr ? new_state : 0;
765 
766 	/* non-ERROR states are handled elsewhere */
767 	if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
768 		return;
769 
770 	can_change_state(ndev, cf, tx_state, rx_state);
771 
772 	if (cf) {
773 		cf->data[6] = txerr;
774 		cf->data[7] = rxerr;
775 	}
776 }
777 
778 /**
779  * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
780  * @ndev:	Pointer to net_device structure
781  *
782  * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
783  * the performed RX/TX has caused it to drop to a lesser state and set
784  * the interface state accordingly.
785  */
786 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
787 {
788 	struct xcan_priv *priv = netdev_priv(ndev);
789 	enum can_state old_state = priv->can.state;
790 	enum can_state new_state;
791 
792 	/* changing error state due to successful frame RX/TX can only
793 	 * occur from these states
794 	 */
795 	if (old_state != CAN_STATE_ERROR_WARNING &&
796 	    old_state != CAN_STATE_ERROR_PASSIVE)
797 		return;
798 
799 	new_state = xcan_current_error_state(ndev);
800 
801 	if (new_state != old_state) {
802 		struct sk_buff *skb;
803 		struct can_frame *cf;
804 
805 		skb = alloc_can_err_skb(ndev, &cf);
806 
807 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
808 
809 		if (skb) {
810 			struct net_device_stats *stats = &ndev->stats;
811 
812 			stats->rx_packets++;
813 			stats->rx_bytes += cf->can_dlc;
814 			netif_rx(skb);
815 		}
816 	}
817 }
818 
819 /**
820  * xcan_err_interrupt - error frame Isr
821  * @ndev:	net_device pointer
822  * @isr:	interrupt status register value
823  *
824  * This is the CAN error interrupt and it will
825  * check the the type of error and forward the error
826  * frame to upper layers.
827  */
828 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
829 {
830 	struct xcan_priv *priv = netdev_priv(ndev);
831 	struct net_device_stats *stats = &ndev->stats;
832 	struct can_frame *cf;
833 	struct sk_buff *skb;
834 	u32 err_status;
835 
836 	skb = alloc_can_err_skb(ndev, &cf);
837 
838 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
839 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
840 
841 	if (isr & XCAN_IXR_BSOFF_MASK) {
842 		priv->can.state = CAN_STATE_BUS_OFF;
843 		priv->can.can_stats.bus_off++;
844 		/* Leave device in Config Mode in bus-off state */
845 		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
846 		can_bus_off(ndev);
847 		if (skb)
848 			cf->can_id |= CAN_ERR_BUSOFF;
849 	} else {
850 		enum can_state new_state = xcan_current_error_state(ndev);
851 
852 		if (new_state != priv->can.state)
853 			xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
854 	}
855 
856 	/* Check for Arbitration lost interrupt */
857 	if (isr & XCAN_IXR_ARBLST_MASK) {
858 		priv->can.can_stats.arbitration_lost++;
859 		if (skb) {
860 			cf->can_id |= CAN_ERR_LOSTARB;
861 			cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
862 		}
863 	}
864 
865 	/* Check for RX FIFO Overflow interrupt */
866 	if (isr & XCAN_IXR_RXOFLW_MASK) {
867 		stats->rx_over_errors++;
868 		stats->rx_errors++;
869 		if (skb) {
870 			cf->can_id |= CAN_ERR_CRTL;
871 			cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
872 		}
873 	}
874 
875 	/* Check for RX Match Not Finished interrupt */
876 	if (isr & XCAN_IXR_RXMNF_MASK) {
877 		stats->rx_dropped++;
878 		stats->rx_errors++;
879 		netdev_err(ndev, "RX match not finished, frame discarded\n");
880 		if (skb) {
881 			cf->can_id |= CAN_ERR_CRTL;
882 			cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
883 		}
884 	}
885 
886 	/* Check for error interrupt */
887 	if (isr & XCAN_IXR_ERROR_MASK) {
888 		if (skb)
889 			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
890 
891 		/* Check for Ack error interrupt */
892 		if (err_status & XCAN_ESR_ACKER_MASK) {
893 			stats->tx_errors++;
894 			if (skb) {
895 				cf->can_id |= CAN_ERR_ACK;
896 				cf->data[3] = CAN_ERR_PROT_LOC_ACK;
897 			}
898 		}
899 
900 		/* Check for Bit error interrupt */
901 		if (err_status & XCAN_ESR_BERR_MASK) {
902 			stats->tx_errors++;
903 			if (skb) {
904 				cf->can_id |= CAN_ERR_PROT;
905 				cf->data[2] = CAN_ERR_PROT_BIT;
906 			}
907 		}
908 
909 		/* Check for Stuff error interrupt */
910 		if (err_status & XCAN_ESR_STER_MASK) {
911 			stats->rx_errors++;
912 			if (skb) {
913 				cf->can_id |= CAN_ERR_PROT;
914 				cf->data[2] = CAN_ERR_PROT_STUFF;
915 			}
916 		}
917 
918 		/* Check for Form error interrupt */
919 		if (err_status & XCAN_ESR_FMER_MASK) {
920 			stats->rx_errors++;
921 			if (skb) {
922 				cf->can_id |= CAN_ERR_PROT;
923 				cf->data[2] = CAN_ERR_PROT_FORM;
924 			}
925 		}
926 
927 		/* Check for CRC error interrupt */
928 		if (err_status & XCAN_ESR_CRCER_MASK) {
929 			stats->rx_errors++;
930 			if (skb) {
931 				cf->can_id |= CAN_ERR_PROT;
932 				cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
933 			}
934 		}
935 			priv->can.can_stats.bus_error++;
936 	}
937 
938 	if (skb) {
939 		stats->rx_packets++;
940 		stats->rx_bytes += cf->can_dlc;
941 		netif_rx(skb);
942 	}
943 
944 	netdev_dbg(ndev, "%s: error status register:0x%x\n",
945 			__func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
946 }
947 
948 /**
949  * xcan_state_interrupt - It will check the state of the CAN device
950  * @ndev:	net_device pointer
951  * @isr:	interrupt status register value
952  *
953  * This will checks the state of the CAN device
954  * and puts the device into appropriate state.
955  */
956 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
957 {
958 	struct xcan_priv *priv = netdev_priv(ndev);
959 
960 	/* Check for Sleep interrupt if set put CAN device in sleep state */
961 	if (isr & XCAN_IXR_SLP_MASK)
962 		priv->can.state = CAN_STATE_SLEEPING;
963 
964 	/* Check for Wake up interrupt if set put CAN device in Active state */
965 	if (isr & XCAN_IXR_WKUP_MASK)
966 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
967 }
968 
969 /**
970  * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
971  *
972  * Return: Register offset of the next frame in RX FIFO.
973  */
974 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
975 {
976 	int offset;
977 
978 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
979 		u32 fsr;
980 
981 		/* clear RXOK before the is-empty check so that any newly
982 		 * received frame will reassert it without a race
983 		 */
984 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
985 
986 		fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
987 
988 		/* check if RX FIFO is empty */
989 		if (!(fsr & XCAN_FSR_FL_MASK))
990 			return -ENOENT;
991 
992 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
993 			offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
994 		else
995 			offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
996 
997 	} else {
998 		/* check if RX FIFO is empty */
999 		if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1000 		      XCAN_IXR_RXNEMP_MASK))
1001 			return -ENOENT;
1002 
1003 		/* frames are read from a static offset */
1004 		offset = XCAN_RXFIFO_OFFSET;
1005 	}
1006 
1007 	return offset;
1008 }
1009 
1010 /**
1011  * xcan_rx_poll - Poll routine for rx packets (NAPI)
1012  * @napi:	napi structure pointer
1013  * @quota:	Max number of rx packets to be processed.
1014  *
1015  * This is the poll routine for rx part.
1016  * It will process the packets maximux quota value.
1017  *
1018  * Return: number of packets received
1019  */
1020 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1021 {
1022 	struct net_device *ndev = napi->dev;
1023 	struct xcan_priv *priv = netdev_priv(ndev);
1024 	u32 ier;
1025 	int work_done = 0;
1026 	int frame_offset;
1027 
1028 	while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1029 	       (work_done < quota)) {
1030 		work_done += xcan_rx(ndev, frame_offset);
1031 
1032 		if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1033 			/* increment read index */
1034 			priv->write_reg(priv, XCAN_FSR_OFFSET,
1035 					XCAN_FSR_IRI_MASK);
1036 		else
1037 			/* clear rx-not-empty (will actually clear only if
1038 			 * empty)
1039 			 */
1040 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1041 					XCAN_IXR_RXNEMP_MASK);
1042 	}
1043 
1044 	if (work_done) {
1045 		can_led_event(ndev, CAN_LED_EVENT_RX);
1046 		xcan_update_error_state_after_rxtx(ndev);
1047 	}
1048 
1049 	if (work_done < quota) {
1050 		napi_complete_done(napi, work_done);
1051 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1052 		ier |= xcan_rx_int_mask(priv);
1053 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1054 	}
1055 	return work_done;
1056 }
1057 
1058 /**
1059  * xcan_tx_interrupt - Tx Done Isr
1060  * @ndev:	net_device pointer
1061  * @isr:	Interrupt status register value
1062  */
1063 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1064 {
1065 	struct xcan_priv *priv = netdev_priv(ndev);
1066 	struct net_device_stats *stats = &ndev->stats;
1067 	unsigned int frames_in_fifo;
1068 	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1069 	unsigned long flags;
1070 	int retries = 0;
1071 
1072 	/* Synchronize with xmit as we need to know the exact number
1073 	 * of frames in the FIFO to stay in sync due to the TXFEMP
1074 	 * handling.
1075 	 * This also prevents a race between netif_wake_queue() and
1076 	 * netif_stop_queue().
1077 	 */
1078 	spin_lock_irqsave(&priv->tx_lock, flags);
1079 
1080 	frames_in_fifo = priv->tx_head - priv->tx_tail;
1081 
1082 	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1083 		/* clear TXOK anyway to avoid getting back here */
1084 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1085 		spin_unlock_irqrestore(&priv->tx_lock, flags);
1086 		return;
1087 	}
1088 
1089 	/* Check if 2 frames were sent (TXOK only means that at least 1
1090 	 * frame was sent).
1091 	 */
1092 	if (frames_in_fifo > 1) {
1093 		WARN_ON(frames_in_fifo > priv->tx_max);
1094 
1095 		/* Synchronize TXOK and isr so that after the loop:
1096 		 * (1) isr variable is up-to-date at least up to TXOK clear
1097 		 *     time. This avoids us clearing a TXOK of a second frame
1098 		 *     but not noticing that the FIFO is now empty and thus
1099 		 *     marking only a single frame as sent.
1100 		 * (2) No TXOK is left. Having one could mean leaving a
1101 		 *     stray TXOK as we might process the associated frame
1102 		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
1103 		 *     clear to satisfy (1).
1104 		 */
1105 		while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
1106 			priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1107 			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1108 		}
1109 
1110 		if (isr & XCAN_IXR_TXFEMP_MASK) {
1111 			/* nothing in FIFO anymore */
1112 			frames_sent = frames_in_fifo;
1113 		}
1114 	} else {
1115 		/* single frame in fifo, just clear TXOK */
1116 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1117 	}
1118 
1119 	while (frames_sent--) {
1120 		stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1121 						    priv->tx_max);
1122 		priv->tx_tail++;
1123 		stats->tx_packets++;
1124 	}
1125 
1126 	netif_wake_queue(ndev);
1127 
1128 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1129 
1130 	can_led_event(ndev, CAN_LED_EVENT_TX);
1131 	xcan_update_error_state_after_rxtx(ndev);
1132 }
1133 
1134 /**
1135  * xcan_interrupt - CAN Isr
1136  * @irq:	irq number
1137  * @dev_id:	device id poniter
1138  *
1139  * This is the xilinx CAN Isr. It checks for the type of interrupt
1140  * and invokes the corresponding ISR.
1141  *
1142  * Return:
1143  * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1144  */
1145 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1146 {
1147 	struct net_device *ndev = (struct net_device *)dev_id;
1148 	struct xcan_priv *priv = netdev_priv(ndev);
1149 	u32 isr, ier;
1150 	u32 isr_errors;
1151 	u32 rx_int_mask = xcan_rx_int_mask(priv);
1152 
1153 	/* Get the interrupt status from Xilinx CAN */
1154 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1155 	if (!isr)
1156 		return IRQ_NONE;
1157 
1158 	/* Check for the type of interrupt and Processing it */
1159 	if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1160 		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1161 				XCAN_IXR_WKUP_MASK));
1162 		xcan_state_interrupt(ndev, isr);
1163 	}
1164 
1165 	/* Check for Tx interrupt and Processing it */
1166 	if (isr & XCAN_IXR_TXOK_MASK)
1167 		xcan_tx_interrupt(ndev, isr);
1168 
1169 	/* Check for the type of error interrupt and Processing it */
1170 	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1171 			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1172 			    XCAN_IXR_RXMNF_MASK);
1173 	if (isr_errors) {
1174 		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1175 		xcan_err_interrupt(ndev, isr);
1176 	}
1177 
1178 	/* Check for the type of receive interrupt and Processing it */
1179 	if (isr & rx_int_mask) {
1180 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1181 		ier &= ~rx_int_mask;
1182 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1183 		napi_schedule(&priv->napi);
1184 	}
1185 	return IRQ_HANDLED;
1186 }
1187 
1188 /**
1189  * xcan_chip_stop - Driver stop routine
1190  * @ndev:	Pointer to net_device structure
1191  *
1192  * This is the drivers stop routine. It will disable the
1193  * interrupts and put the device into configuration mode.
1194  */
1195 static void xcan_chip_stop(struct net_device *ndev)
1196 {
1197 	struct xcan_priv *priv = netdev_priv(ndev);
1198 
1199 	/* Disable interrupts and leave the can in configuration mode */
1200 	set_reset_mode(ndev);
1201 	priv->can.state = CAN_STATE_STOPPED;
1202 }
1203 
1204 /**
1205  * xcan_open - Driver open routine
1206  * @ndev:	Pointer to net_device structure
1207  *
1208  * This is the driver open routine.
1209  * Return: 0 on success and failure value on error
1210  */
1211 static int xcan_open(struct net_device *ndev)
1212 {
1213 	struct xcan_priv *priv = netdev_priv(ndev);
1214 	int ret;
1215 
1216 	ret = pm_runtime_get_sync(priv->dev);
1217 	if (ret < 0) {
1218 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1219 				__func__, ret);
1220 		return ret;
1221 	}
1222 
1223 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1224 			ndev->name, ndev);
1225 	if (ret < 0) {
1226 		netdev_err(ndev, "irq allocation for CAN failed\n");
1227 		goto err;
1228 	}
1229 
1230 	/* Set chip into reset mode */
1231 	ret = set_reset_mode(ndev);
1232 	if (ret < 0) {
1233 		netdev_err(ndev, "mode resetting failed!\n");
1234 		goto err_irq;
1235 	}
1236 
1237 	/* Common open */
1238 	ret = open_candev(ndev);
1239 	if (ret)
1240 		goto err_irq;
1241 
1242 	ret = xcan_chip_start(ndev);
1243 	if (ret < 0) {
1244 		netdev_err(ndev, "xcan_chip_start failed!\n");
1245 		goto err_candev;
1246 	}
1247 
1248 	can_led_event(ndev, CAN_LED_EVENT_OPEN);
1249 	napi_enable(&priv->napi);
1250 	netif_start_queue(ndev);
1251 
1252 	return 0;
1253 
1254 err_candev:
1255 	close_candev(ndev);
1256 err_irq:
1257 	free_irq(ndev->irq, ndev);
1258 err:
1259 	pm_runtime_put(priv->dev);
1260 
1261 	return ret;
1262 }
1263 
1264 /**
1265  * xcan_close - Driver close routine
1266  * @ndev:	Pointer to net_device structure
1267  *
1268  * Return: 0 always
1269  */
1270 static int xcan_close(struct net_device *ndev)
1271 {
1272 	struct xcan_priv *priv = netdev_priv(ndev);
1273 
1274 	netif_stop_queue(ndev);
1275 	napi_disable(&priv->napi);
1276 	xcan_chip_stop(ndev);
1277 	free_irq(ndev->irq, ndev);
1278 	close_candev(ndev);
1279 
1280 	can_led_event(ndev, CAN_LED_EVENT_STOP);
1281 	pm_runtime_put(priv->dev);
1282 
1283 	return 0;
1284 }
1285 
1286 /**
1287  * xcan_get_berr_counter - error counter routine
1288  * @ndev:	Pointer to net_device structure
1289  * @bec:	Pointer to can_berr_counter structure
1290  *
1291  * This is the driver error counter routine.
1292  * Return: 0 on success and failure value on error
1293  */
1294 static int xcan_get_berr_counter(const struct net_device *ndev,
1295 					struct can_berr_counter *bec)
1296 {
1297 	struct xcan_priv *priv = netdev_priv(ndev);
1298 	int ret;
1299 
1300 	ret = pm_runtime_get_sync(priv->dev);
1301 	if (ret < 0) {
1302 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1303 				__func__, ret);
1304 		return ret;
1305 	}
1306 
1307 	bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1308 	bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1309 			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1310 
1311 	pm_runtime_put(priv->dev);
1312 
1313 	return 0;
1314 }
1315 
1316 
1317 static const struct net_device_ops xcan_netdev_ops = {
1318 	.ndo_open	= xcan_open,
1319 	.ndo_stop	= xcan_close,
1320 	.ndo_start_xmit	= xcan_start_xmit,
1321 	.ndo_change_mtu	= can_change_mtu,
1322 };
1323 
1324 /**
1325  * xcan_suspend - Suspend method for the driver
1326  * @dev:	Address of the device structure
1327  *
1328  * Put the driver into low power mode.
1329  * Return: 0 on success and failure value on error
1330  */
1331 static int __maybe_unused xcan_suspend(struct device *dev)
1332 {
1333 	struct net_device *ndev = dev_get_drvdata(dev);
1334 
1335 	if (netif_running(ndev)) {
1336 		netif_stop_queue(ndev);
1337 		netif_device_detach(ndev);
1338 		xcan_chip_stop(ndev);
1339 	}
1340 
1341 	return pm_runtime_force_suspend(dev);
1342 }
1343 
1344 /**
1345  * xcan_resume - Resume from suspend
1346  * @dev:	Address of the device structure
1347  *
1348  * Resume operation after suspend.
1349  * Return: 0 on success and failure value on error
1350  */
1351 static int __maybe_unused xcan_resume(struct device *dev)
1352 {
1353 	struct net_device *ndev = dev_get_drvdata(dev);
1354 	int ret;
1355 
1356 	ret = pm_runtime_force_resume(dev);
1357 	if (ret) {
1358 		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1359 		return ret;
1360 	}
1361 
1362 	if (netif_running(ndev)) {
1363 		ret = xcan_chip_start(ndev);
1364 		if (ret) {
1365 			dev_err(dev, "xcan_chip_start failed on resume\n");
1366 			return ret;
1367 		}
1368 
1369 		netif_device_attach(ndev);
1370 		netif_start_queue(ndev);
1371 	}
1372 
1373 	return 0;
1374 }
1375 
1376 /**
1377  * xcan_runtime_suspend - Runtime suspend method for the driver
1378  * @dev:	Address of the device structure
1379  *
1380  * Put the driver into low power mode.
1381  * Return: 0 always
1382  */
1383 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1384 {
1385 	struct net_device *ndev = dev_get_drvdata(dev);
1386 	struct xcan_priv *priv = netdev_priv(ndev);
1387 
1388 	clk_disable_unprepare(priv->bus_clk);
1389 	clk_disable_unprepare(priv->can_clk);
1390 
1391 	return 0;
1392 }
1393 
1394 /**
1395  * xcan_runtime_resume - Runtime resume from suspend
1396  * @dev:	Address of the device structure
1397  *
1398  * Resume operation after suspend.
1399  * Return: 0 on success and failure value on error
1400  */
1401 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1402 {
1403 	struct net_device *ndev = dev_get_drvdata(dev);
1404 	struct xcan_priv *priv = netdev_priv(ndev);
1405 	int ret;
1406 
1407 	ret = clk_prepare_enable(priv->bus_clk);
1408 	if (ret) {
1409 		dev_err(dev, "Cannot enable clock.\n");
1410 		return ret;
1411 	}
1412 	ret = clk_prepare_enable(priv->can_clk);
1413 	if (ret) {
1414 		dev_err(dev, "Cannot enable clock.\n");
1415 		clk_disable_unprepare(priv->bus_clk);
1416 		return ret;
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 static const struct dev_pm_ops xcan_dev_pm_ops = {
1423 	SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1424 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1425 };
1426 
1427 static const struct xcan_devtype_data xcan_zynq_data = {
1428 	.bittiming_const = &xcan_bittiming_const,
1429 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1430 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1431 	.bus_clk_name = "pclk",
1432 };
1433 
1434 static const struct xcan_devtype_data xcan_axi_data = {
1435 	.bittiming_const = &xcan_bittiming_const,
1436 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1437 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1438 	.bus_clk_name = "s_axi_aclk",
1439 };
1440 
1441 static const struct xcan_devtype_data xcan_canfd_data = {
1442 	.flags = XCAN_FLAG_EXT_FILTERS |
1443 		 XCAN_FLAG_RXMNF |
1444 		 XCAN_FLAG_TX_MAILBOXES |
1445 		 XCAN_FLAG_RX_FIFO_MULTI,
1446 	.bittiming_const = &xcan_bittiming_const,
1447 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1448 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1449 	.bus_clk_name = "s_axi_aclk",
1450 };
1451 
1452 static const struct xcan_devtype_data xcan_canfd2_data = {
1453 	.flags = XCAN_FLAG_EXT_FILTERS |
1454 		 XCAN_FLAG_RXMNF |
1455 		 XCAN_FLAG_TX_MAILBOXES |
1456 		 XCAN_FLAG_CANFD_2 |
1457 		 XCAN_FLAG_RX_FIFO_MULTI,
1458 	.bittiming_const = &xcan_bittiming_const_canfd2,
1459 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1460 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1461 	.bus_clk_name = "s_axi_aclk",
1462 };
1463 
1464 /* Match table for OF platform binding */
1465 static const struct of_device_id xcan_of_match[] = {
1466 	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1467 	{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1468 	{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1469 	{ .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1470 	{ /* end of list */ },
1471 };
1472 MODULE_DEVICE_TABLE(of, xcan_of_match);
1473 
1474 /**
1475  * xcan_probe - Platform registration call
1476  * @pdev:	Handle to the platform device structure
1477  *
1478  * This function does all the memory allocation and registration for the CAN
1479  * device.
1480  *
1481  * Return: 0 on success and failure value on error
1482  */
1483 static int xcan_probe(struct platform_device *pdev)
1484 {
1485 	struct resource *res; /* IO mem resources */
1486 	struct net_device *ndev;
1487 	struct xcan_priv *priv;
1488 	const struct of_device_id *of_id;
1489 	const struct xcan_devtype_data *devtype = &xcan_axi_data;
1490 	void __iomem *addr;
1491 	int ret;
1492 	int rx_max, tx_max;
1493 	int hw_tx_max, hw_rx_max;
1494 	const char *hw_tx_max_property;
1495 
1496 	/* Get the virtual base address for the device */
1497 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1498 	addr = devm_ioremap_resource(&pdev->dev, res);
1499 	if (IS_ERR(addr)) {
1500 		ret = PTR_ERR(addr);
1501 		goto err;
1502 	}
1503 
1504 	of_id = of_match_device(xcan_of_match, &pdev->dev);
1505 	if (of_id && of_id->data)
1506 		devtype = of_id->data;
1507 
1508 	hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1509 			     "tx-mailbox-count" : "tx-fifo-depth";
1510 
1511 	ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1512 				   &hw_tx_max);
1513 	if (ret < 0) {
1514 		dev_err(&pdev->dev, "missing %s property\n",
1515 			hw_tx_max_property);
1516 		goto err;
1517 	}
1518 
1519 	ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1520 				   &hw_rx_max);
1521 	if (ret < 0) {
1522 		dev_err(&pdev->dev,
1523 			"missing rx-fifo-depth property (mailbox mode is not supported)\n");
1524 		goto err;
1525 	}
1526 
1527 	/* With TX FIFO:
1528 	 *
1529 	 * There is no way to directly figure out how many frames have been
1530 	 * sent when the TXOK interrupt is processed. If TXFEMP
1531 	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1532 	 * to determine if 1 or 2 frames have been sent.
1533 	 * Theoretically we should be able to use TXFWMEMP to determine up
1534 	 * to 3 frames, but it seems that after putting a second frame in the
1535 	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1536 	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1537 	 * sent), which is not a sensible state - possibly TXFWMEMP is not
1538 	 * completely synchronized with the rest of the bits?
1539 	 *
1540 	 * With TX mailboxes:
1541 	 *
1542 	 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1543 	 * we submit frames one at a time.
1544 	 */
1545 	if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1546 	    (devtype->flags & XCAN_FLAG_TXFEMP))
1547 		tx_max = min(hw_tx_max, 2);
1548 	else
1549 		tx_max = 1;
1550 
1551 	rx_max = hw_rx_max;
1552 
1553 	/* Create a CAN device instance */
1554 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1555 	if (!ndev)
1556 		return -ENOMEM;
1557 
1558 	priv = netdev_priv(ndev);
1559 	priv->dev = &pdev->dev;
1560 	priv->can.bittiming_const = devtype->bittiming_const;
1561 	priv->can.do_set_mode = xcan_do_set_mode;
1562 	priv->can.do_get_berr_counter = xcan_get_berr_counter;
1563 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1564 					CAN_CTRLMODE_BERR_REPORTING;
1565 	priv->reg_base = addr;
1566 	priv->tx_max = tx_max;
1567 	priv->devtype = *devtype;
1568 	spin_lock_init(&priv->tx_lock);
1569 
1570 	/* Get IRQ for the device */
1571 	ndev->irq = platform_get_irq(pdev, 0);
1572 	ndev->flags |= IFF_ECHO;	/* We support local echo */
1573 
1574 	platform_set_drvdata(pdev, ndev);
1575 	SET_NETDEV_DEV(ndev, &pdev->dev);
1576 	ndev->netdev_ops = &xcan_netdev_ops;
1577 
1578 	/* Getting the CAN can_clk info */
1579 	priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1580 	if (IS_ERR(priv->can_clk)) {
1581 		dev_err(&pdev->dev, "Device clock not found.\n");
1582 		ret = PTR_ERR(priv->can_clk);
1583 		goto err_free;
1584 	}
1585 
1586 	priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1587 	if (IS_ERR(priv->bus_clk)) {
1588 		dev_err(&pdev->dev, "bus clock not found\n");
1589 		ret = PTR_ERR(priv->bus_clk);
1590 		goto err_free;
1591 	}
1592 
1593 	priv->write_reg = xcan_write_reg_le;
1594 	priv->read_reg = xcan_read_reg_le;
1595 
1596 	pm_runtime_enable(&pdev->dev);
1597 	ret = pm_runtime_get_sync(&pdev->dev);
1598 	if (ret < 0) {
1599 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1600 			__func__, ret);
1601 		goto err_pmdisable;
1602 	}
1603 
1604 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1605 		priv->write_reg = xcan_write_reg_be;
1606 		priv->read_reg = xcan_read_reg_be;
1607 	}
1608 
1609 	priv->can.clock.freq = clk_get_rate(priv->can_clk);
1610 
1611 	netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1612 
1613 	ret = register_candev(ndev);
1614 	if (ret) {
1615 		dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1616 		goto err_disableclks;
1617 	}
1618 
1619 	devm_can_led_init(ndev);
1620 
1621 	pm_runtime_put(&pdev->dev);
1622 
1623 	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1624 		   priv->reg_base, ndev->irq, priv->can.clock.freq,
1625 		   hw_tx_max, priv->tx_max);
1626 
1627 	return 0;
1628 
1629 err_disableclks:
1630 	pm_runtime_put(priv->dev);
1631 err_pmdisable:
1632 	pm_runtime_disable(&pdev->dev);
1633 err_free:
1634 	free_candev(ndev);
1635 err:
1636 	return ret;
1637 }
1638 
1639 /**
1640  * xcan_remove - Unregister the device after releasing the resources
1641  * @pdev:	Handle to the platform device structure
1642  *
1643  * This function frees all the resources allocated to the device.
1644  * Return: 0 always
1645  */
1646 static int xcan_remove(struct platform_device *pdev)
1647 {
1648 	struct net_device *ndev = platform_get_drvdata(pdev);
1649 	struct xcan_priv *priv = netdev_priv(ndev);
1650 
1651 	unregister_candev(ndev);
1652 	pm_runtime_disable(&pdev->dev);
1653 	netif_napi_del(&priv->napi);
1654 	free_candev(ndev);
1655 
1656 	return 0;
1657 }
1658 
1659 static struct platform_driver xcan_driver = {
1660 	.probe = xcan_probe,
1661 	.remove	= xcan_remove,
1662 	.driver	= {
1663 		.name = DRIVER_NAME,
1664 		.pm = &xcan_dev_pm_ops,
1665 		.of_match_table	= xcan_of_match,
1666 	},
1667 };
1668 
1669 module_platform_driver(xcan_driver);
1670 
1671 MODULE_LICENSE("GPL");
1672 MODULE_AUTHOR("Xilinx Inc");
1673 MODULE_DESCRIPTION("Xilinx CAN interface");
1674