xref: /openbmc/linux/drivers/net/can/kvaser_pciefd.c (revision a1b2f04ea527397fcacacd09e0d690927feef429)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3  * Parts of this driver are based on the following:
4  *  - Kvaser linux pciefd driver (version 5.25)
5  *  - PEAK linux canfd driver
6  *  - Altera Avalon EPCS flash controller driver
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/version.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/pci.h>
14 #include <linux/can/dev.h>
15 #include <linux/timer.h>
16 #include <linux/netdevice.h>
17 #include <linux/crc32.h>
18 #include <linux/iopoll.h>
19 
20 MODULE_LICENSE("Dual BSD/GPL");
21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
23 
24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
25 
26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
28 #define KVASER_PCIEFD_MAX_ERR_REP 256
29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
31 #define KVASER_PCIEFD_DMA_COUNT 2
32 
33 #define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
34 #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
35 
36 #define KVASER_PCIEFD_VENDOR 0x1a07
37 #define KVASER_PCIEFD_4HS_ID 0x0d
38 #define KVASER_PCIEFD_2HS_ID 0x0e
39 #define KVASER_PCIEFD_HS_ID 0x0f
40 #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
41 #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
42 
43 /* PCIe IRQ registers */
44 #define KVASER_PCIEFD_IRQ_REG 0x40
45 #define KVASER_PCIEFD_IEN_REG 0x50
46 /* DMA map */
47 #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
48 /* Kvaser KCAN CAN controller registers */
49 #define KVASER_PCIEFD_KCAN0_BASE 0x10000
50 #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
51 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
52 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
53 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
54 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
55 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
56 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
57 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
58 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
59 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
60 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
61 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
62 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
63 /* Loopback control register */
64 #define KVASER_PCIEFD_LOOP_REG 0x1f000
65 /* System identification and information registers */
66 #define KVASER_PCIEFD_SYSID_BASE 0x1f020
67 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
68 #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
69 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
70 /* Shared receive buffer registers */
71 #define KVASER_PCIEFD_SRB_BASE 0x1f200
72 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
73 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
74 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
75 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
76 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
77 /* EPCS flash controller registers */
78 #define KVASER_PCIEFD_SPI_BASE 0x1fc00
79 #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
80 #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
81 #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
82 #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
83 #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
84 
85 #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
86 #define KVASER_PCIEFD_IRQ_SRB BIT(4)
87 
88 #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
89 #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
90 #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
91 
92 /* Reset DMA buffer 0, 1 and FIFO offset */
93 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
94 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
95 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
96 
97 /* DMA packet done, buffer 0 and 1 */
98 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
99 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
100 /* DMA overflow, buffer 0 and 1 */
101 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
102 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
103 /* DMA underflow, buffer 0 and 1 */
104 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
105 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
106 
107 /* DMA idle */
108 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
109 /* DMA support */
110 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
111 
112 /* DMA Enable */
113 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
114 
115 /* EPCS flash controller definitions */
116 #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
117 #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
118 #define KVASER_PCIEFD_CFG_MAX_PARAMS 256
119 #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
120 #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
121 #define KVASER_PCIEFD_CFG_SYS_VER 1
122 #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
123 #define KVASER_PCIEFD_SPI_TMT BIT(5)
124 #define KVASER_PCIEFD_SPI_TRDY BIT(6)
125 #define KVASER_PCIEFD_SPI_RRDY BIT(7)
126 #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
127 /* Commands for controlling the onboard flash */
128 #define KVASER_PCIEFD_FLASH_RES_CMD 0xab
129 #define KVASER_PCIEFD_FLASH_READ_CMD 0x3
130 #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
131 
132 /* Kvaser KCAN definitions */
133 #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
134 #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
135 
136 #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
137 /* Request status packet */
138 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
139 /* Abort, flush and reset */
140 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
141 
142 /* Tx FIFO unaligned read */
143 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
144 /* Tx FIFO unaligned end */
145 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
146 /* Bus parameter protection error */
147 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
148 /* FDF bit when controller is in classic mode */
149 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
150 /* Rx FIFO overflow */
151 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
152 /* Abort done */
153 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
154 /* Tx buffer flush done */
155 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
156 /* Tx FIFO overflow */
157 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
158 /* Tx FIFO empty */
159 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
160 /* Transmitter unaligned */
161 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
162 
163 #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
164 
165 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
166 /* Abort request */
167 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
168 /* Idle state. Controller in reset mode and no abort or flush pending */
169 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
170 /* Bus off */
171 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
172 /* Reset mode request */
173 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
174 /* Controller in reset mode */
175 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
176 /* Controller got one-shot capability */
177 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
178 /* Controller got CAN FD capability */
179 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
180 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
181 	KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
182 	KVASER_PCIEFD_KCAN_STAT_IRM)
183 
184 /* Reset mode */
185 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
186 /* Listen only mode */
187 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
188 /* Error packet enable */
189 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
190 /* CAN FD non-ISO */
191 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
192 /* Acknowledgment packet type */
193 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
194 /* Active error flag enable. Clear to force error passive */
195 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
196 /* Classic CAN mode */
197 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
198 
199 #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
200 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
201 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
202 
203 #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
204 
205 /* Kvaser KCAN packet types */
206 #define KVASER_PCIEFD_PACK_TYPE_DATA 0
207 #define KVASER_PCIEFD_PACK_TYPE_ACK 1
208 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
209 #define KVASER_PCIEFD_PACK_TYPE_ERROR 3
210 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
211 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
212 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
213 #define KVASER_PCIEFD_PACK_TYPE_STATUS 8
214 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
215 
216 /* Kvaser KCAN packet common definitions */
217 #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
218 #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
219 #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
220 
221 /* Kvaser KCAN TDATA and RDATA first word */
222 #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
223 #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
224 /* Kvaser KCAN TDATA and RDATA second word */
225 #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
226 #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
227 #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
228 #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
229 /* Kvaser KCAN TDATA second word */
230 #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
231 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
232 
233 /* Kvaser KCAN APACKET */
234 #define KVASER_PCIEFD_APACKET_FLU BIT(8)
235 #define KVASER_PCIEFD_APACKET_CT BIT(9)
236 #define KVASER_PCIEFD_APACKET_ABL BIT(10)
237 #define KVASER_PCIEFD_APACKET_NACK BIT(11)
238 
239 /* Kvaser KCAN SPACK first word */
240 #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
241 #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
242 #define KVASER_PCIEFD_SPACK_IDET BIT(20)
243 #define KVASER_PCIEFD_SPACK_IRM BIT(21)
244 #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
245 /* Kvaser KCAN SPACK second word */
246 #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
247 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
248 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
249 
250 struct kvaser_pciefd;
251 
252 struct kvaser_pciefd_can {
253 	struct can_priv can;
254 	struct kvaser_pciefd *kv_pcie;
255 	void __iomem *reg_base;
256 	struct can_berr_counter bec;
257 	u8 cmd_seq;
258 	int err_rep_cnt;
259 	int echo_idx;
260 	spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
261 	spinlock_t echo_lock; /* Locks the message echo buffer */
262 	struct timer_list bec_poll_timer;
263 	struct completion start_comp, flush_comp;
264 };
265 
266 struct kvaser_pciefd {
267 	struct pci_dev *pci;
268 	void __iomem *reg_base;
269 	struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
270 	void *dma_data[KVASER_PCIEFD_DMA_COUNT];
271 	u8 nr_channels;
272 	u32 freq;
273 	u32 freq_to_ticks_div;
274 };
275 
276 struct kvaser_pciefd_rx_packet {
277 	u32 header[2];
278 	u64 timestamp;
279 };
280 
281 struct kvaser_pciefd_tx_packet {
282 	u32 header[2];
283 	u8 data[64];
284 };
285 
286 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
287 	.name = KVASER_PCIEFD_DRV_NAME,
288 	.tseg1_min = 1,
289 	.tseg1_max = 255,
290 	.tseg2_min = 1,
291 	.tseg2_max = 32,
292 	.sjw_max = 16,
293 	.brp_min = 1,
294 	.brp_max = 4096,
295 	.brp_inc = 1,
296 };
297 
298 struct kvaser_pciefd_cfg_param {
299 	__le32 magic;
300 	__le32 nr;
301 	__le32 len;
302 	u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
303 };
304 
305 struct kvaser_pciefd_cfg_img {
306 	__le32 version;
307 	__le32 magic;
308 	__le32 crc;
309 	struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
310 };
311 
312 static struct pci_device_id kvaser_pciefd_id_table[] = {
313 	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
314 	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
315 	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
316 	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
317 	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
318 	{ 0,},
319 };
320 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
321 
322 /* Onboard flash memory functions */
323 static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
324 {
325 	u32 res;
326 	int ret;
327 
328 	ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
329 				 res, res & msk, 0, 10);
330 
331 	return ret;
332 }
333 
334 static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
335 				 u32 tx_len, u8 *rx, u32 rx_len)
336 {
337 	int c;
338 
339 	iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
340 	iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
341 	ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
342 
343 	c = tx_len;
344 	while (c--) {
345 		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
346 			return -EIO;
347 
348 		iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
349 
350 		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
351 			return -EIO;
352 
353 		ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
354 	}
355 
356 	c = rx_len;
357 	while (c-- > 0) {
358 		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
359 			return -EIO;
360 
361 		iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
362 
363 		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
364 			return -EIO;
365 
366 		*rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
367 	}
368 
369 	if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
370 		return -EIO;
371 
372 	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
373 
374 	if (c != -1) {
375 		dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
376 		return -EIO;
377 	}
378 
379 	return 0;
380 }
381 
382 static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
383 					     struct kvaser_pciefd_cfg_img *img)
384 {
385 	int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
386 	int res, crc;
387 	u8 *crc_buff;
388 
389 	u8 cmd[] = {
390 		KVASER_PCIEFD_FLASH_READ_CMD,
391 		(u8)((offset >> 16) & 0xff),
392 		(u8)((offset >> 8) & 0xff),
393 		(u8)(offset & 0xff)
394 	};
395 
396 	res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
397 				    KVASER_PCIEFD_CFG_IMG_SZ);
398 	if (res)
399 		return res;
400 
401 	crc_buff = (u8 *)img->params;
402 
403 	if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
404 		dev_err(&pcie->pci->dev,
405 			"Config flash corrupted, version number is wrong\n");
406 		return -ENODEV;
407 	}
408 
409 	if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
410 		dev_err(&pcie->pci->dev,
411 			"Config flash corrupted, magic number is wrong\n");
412 		return -ENODEV;
413 	}
414 
415 	crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
416 	if (le32_to_cpu(img->crc) != crc) {
417 		dev_err(&pcie->pci->dev,
418 			"Stored CRC does not match flash image contents\n");
419 		return -EIO;
420 	}
421 
422 	return 0;
423 }
424 
425 static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
426 					  struct kvaser_pciefd_cfg_img *img)
427 {
428 	struct kvaser_pciefd_cfg_param *param;
429 
430 	param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
431 	memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
432 }
433 
434 static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
435 {
436 	int res;
437 	struct kvaser_pciefd_cfg_img *img;
438 
439 	/* Read electronic signature */
440 	u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
441 
442 	res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
443 	if (res)
444 		return -EIO;
445 
446 	img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
447 	if (!img)
448 		return -ENOMEM;
449 
450 	if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
451 		dev_err(&pcie->pci->dev,
452 			"Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
453 			cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
454 
455 		res = -ENODEV;
456 		goto image_free;
457 	}
458 
459 	cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
460 	res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
461 	if (res) {
462 		goto image_free;
463 	} else if (cmd[0] & 1) {
464 		res = -EIO;
465 		/* No write is ever done, the WIP should never be set */
466 		dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
467 		goto image_free;
468 	}
469 
470 	res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
471 	if (res) {
472 		res = -EIO;
473 		goto image_free;
474 	}
475 
476 	kvaser_pciefd_cfg_read_params(pcie, img);
477 
478 image_free:
479 	kfree(img);
480 	return res;
481 }
482 
483 static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
484 {
485 	u32 cmd;
486 
487 	cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
488 	cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
489 	iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
490 }
491 
492 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
493 {
494 	u32 mode;
495 	unsigned long irq;
496 
497 	spin_lock_irqsave(&can->lock, irq);
498 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
499 	if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
500 		mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
501 		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
502 	}
503 	spin_unlock_irqrestore(&can->lock, irq);
504 }
505 
506 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
507 {
508 	u32 mode;
509 	unsigned long irq;
510 
511 	spin_lock_irqsave(&can->lock, irq);
512 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
513 	mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
514 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
515 	spin_unlock_irqrestore(&can->lock, irq);
516 }
517 
518 static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
519 {
520 	u32 msk;
521 
522 	msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
523 	      KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
524 	      KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
525 	      KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
526 	      KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
527 
528 	iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
529 
530 	return 0;
531 }
532 
533 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
534 {
535 	u32 mode;
536 	unsigned long irq;
537 
538 	spin_lock_irqsave(&can->lock, irq);
539 
540 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
541 	if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
542 		mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
543 		if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
544 			mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
545 		else
546 			mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
547 	} else {
548 		mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
549 		mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
550 	}
551 
552 	if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
553 		mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
554 
555 	mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
556 	mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
557 	/* Use ACK packet type */
558 	mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
559 	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
560 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
561 
562 	spin_unlock_irqrestore(&can->lock, irq);
563 }
564 
565 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
566 {
567 	u32 status;
568 	unsigned long irq;
569 
570 	spin_lock_irqsave(&can->lock, irq);
571 	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
572 	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
573 		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
574 
575 	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
576 	if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
577 		u32 cmd;
578 
579 		/* If controller is already idle, run abort, flush and reset */
580 		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
581 		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
582 		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
583 	} else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
584 		u32 mode;
585 
586 		/* Put controller in reset mode */
587 		mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
588 		mode |= KVASER_PCIEFD_KCAN_MODE_RM;
589 		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
590 	}
591 
592 	spin_unlock_irqrestore(&can->lock, irq);
593 }
594 
595 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
596 {
597 	u32 mode;
598 	unsigned long irq;
599 
600 	del_timer(&can->bec_poll_timer);
601 
602 	if (!completion_done(&can->flush_comp))
603 		kvaser_pciefd_start_controller_flush(can);
604 
605 	if (!wait_for_completion_timeout(&can->flush_comp,
606 					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
607 		netdev_err(can->can.dev, "Timeout during bus on flush\n");
608 		return -ETIMEDOUT;
609 	}
610 
611 	spin_lock_irqsave(&can->lock, irq);
612 	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
613 	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
614 
615 	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
616 		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
617 
618 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
619 	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
620 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
621 	spin_unlock_irqrestore(&can->lock, irq);
622 
623 	if (!wait_for_completion_timeout(&can->start_comp,
624 					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
625 		netdev_err(can->can.dev, "Timeout during bus on reset\n");
626 		return -ETIMEDOUT;
627 	}
628 	/* Reset interrupt handling */
629 	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
630 	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
631 
632 	kvaser_pciefd_set_tx_irq(can);
633 	kvaser_pciefd_setup_controller(can);
634 
635 	can->can.state = CAN_STATE_ERROR_ACTIVE;
636 	netif_wake_queue(can->can.dev);
637 	can->bec.txerr = 0;
638 	can->bec.rxerr = 0;
639 	can->err_rep_cnt = 0;
640 
641 	return 0;
642 }
643 
644 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
645 {
646 	int top, trigger;
647 	u32 pwm_ctrl;
648 	unsigned long irq;
649 
650 	spin_lock_irqsave(&can->lock, irq);
651 	pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
652 	top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
653 
654 	trigger = (100 * top + 50) / 100;
655 	if (trigger < 0)
656 		trigger = 0;
657 
658 	pwm_ctrl = trigger & 0xff;
659 	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
660 	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
661 	spin_unlock_irqrestore(&can->lock, irq);
662 }
663 
664 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
665 {
666 	int top, trigger;
667 	u32 pwm_ctrl;
668 	unsigned long irq;
669 
670 	kvaser_pciefd_pwm_stop(can);
671 	spin_lock_irqsave(&can->lock, irq);
672 
673 	/* Set frequency to 500 KHz*/
674 	top = can->can.clock.freq / (2 * 500000) - 1;
675 
676 	pwm_ctrl = top & 0xff;
677 	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
678 	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
679 
680 	/* Set duty cycle to 95 */
681 	trigger = (100 * top - 95 * (top + 1) + 50) / 100;
682 	pwm_ctrl = trigger & 0xff;
683 	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
684 	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
685 	spin_unlock_irqrestore(&can->lock, irq);
686 }
687 
688 static int kvaser_pciefd_open(struct net_device *netdev)
689 {
690 	int err;
691 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
692 
693 	err = open_candev(netdev);
694 	if (err)
695 		return err;
696 
697 	err = kvaser_pciefd_bus_on(can);
698 	if (err)
699 		return err;
700 
701 	return 0;
702 }
703 
704 static int kvaser_pciefd_stop(struct net_device *netdev)
705 {
706 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
707 	int ret = 0;
708 
709 	/* Don't interrupt ongoing flush */
710 	if (!completion_done(&can->flush_comp))
711 		kvaser_pciefd_start_controller_flush(can);
712 
713 	if (!wait_for_completion_timeout(&can->flush_comp,
714 					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
715 		netdev_err(can->can.dev, "Timeout during stop\n");
716 		ret = -ETIMEDOUT;
717 	} else {
718 		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
719 		del_timer(&can->bec_poll_timer);
720 	}
721 	close_candev(netdev);
722 
723 	return ret;
724 }
725 
726 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
727 					   struct kvaser_pciefd_can *can,
728 					   struct sk_buff *skb)
729 {
730 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
731 	int packet_size;
732 	int seq = can->echo_idx;
733 
734 	memset(p, 0, sizeof(*p));
735 
736 	if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
737 		p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
738 
739 	if (cf->can_id & CAN_RTR_FLAG)
740 		p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
741 
742 	if (cf->can_id & CAN_EFF_FLAG)
743 		p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
744 
745 	p->header[0] |= cf->can_id & CAN_EFF_MASK;
746 	p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
747 	p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
748 
749 	if (can_is_canfd_skb(skb)) {
750 		p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
751 		if (cf->flags & CANFD_BRS)
752 			p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
753 		if (cf->flags & CANFD_ESI)
754 			p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
755 	}
756 
757 	p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
758 
759 	packet_size = cf->len;
760 	memcpy(p->data, cf->data, packet_size);
761 
762 	return DIV_ROUND_UP(packet_size, 4);
763 }
764 
765 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
766 					    struct net_device *netdev)
767 {
768 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
769 	unsigned long irq_flags;
770 	struct kvaser_pciefd_tx_packet packet;
771 	int nwords;
772 	u8 count;
773 
774 	if (can_dropped_invalid_skb(netdev, skb))
775 		return NETDEV_TX_OK;
776 
777 	nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
778 
779 	spin_lock_irqsave(&can->echo_lock, irq_flags);
780 
781 	/* Prepare and save echo skb in internal slot */
782 	can_put_echo_skb(skb, netdev, can->echo_idx);
783 
784 	/* Move echo index to the next slot */
785 	can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
786 
787 	/* Write header to fifo */
788 	iowrite32(packet.header[0],
789 		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
790 	iowrite32(packet.header[1],
791 		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
792 
793 	if (nwords) {
794 		u32 data_last = ((u32 *)packet.data)[nwords - 1];
795 
796 		/* Write data to fifo, except last word */
797 		iowrite32_rep(can->reg_base +
798 			      KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
799 			      nwords - 1);
800 		/* Write last word to end of fifo */
801 		__raw_writel(data_last, can->reg_base +
802 			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
803 	} else {
804 		/* Complete write to fifo */
805 		__raw_writel(0, can->reg_base +
806 			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
807 	}
808 
809 	count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
810 	/* No room for a new message, stop the queue until at least one
811 	 * successful transmit
812 	 */
813 	if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
814 	    can->can.echo_skb[can->echo_idx])
815 		netif_stop_queue(netdev);
816 
817 	spin_unlock_irqrestore(&can->echo_lock, irq_flags);
818 
819 	return NETDEV_TX_OK;
820 }
821 
822 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
823 {
824 	u32 mode, test, btrn;
825 	unsigned long irq_flags;
826 	int ret;
827 	struct can_bittiming *bt;
828 
829 	if (data)
830 		bt = &can->can.data_bittiming;
831 	else
832 		bt = &can->can.bittiming;
833 
834 	btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
835 	       KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
836 	       (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
837 	       KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
838 	       ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
839 	       ((bt->brp - 1) & 0x1fff);
840 
841 	spin_lock_irqsave(&can->lock, irq_flags);
842 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
843 
844 	/* Put the circuit in reset mode */
845 	iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
846 		  can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
847 
848 	/* Can only set bittiming if in reset mode */
849 	ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
850 				 test, test & KVASER_PCIEFD_KCAN_MODE_RM,
851 				 0, 10);
852 
853 	if (ret) {
854 		spin_unlock_irqrestore(&can->lock, irq_flags);
855 		return -EBUSY;
856 	}
857 
858 	if (data)
859 		iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
860 	else
861 		iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
862 
863 	/* Restore previous reset mode status */
864 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
865 
866 	spin_unlock_irqrestore(&can->lock, irq_flags);
867 	return 0;
868 }
869 
870 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
871 {
872 	return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
873 }
874 
875 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
876 {
877 	return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
878 }
879 
880 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
881 {
882 	struct kvaser_pciefd_can *can = netdev_priv(ndev);
883 	int ret = 0;
884 
885 	switch (mode) {
886 	case CAN_MODE_START:
887 		if (!can->can.restart_ms)
888 			ret = kvaser_pciefd_bus_on(can);
889 		break;
890 	default:
891 		return -EOPNOTSUPP;
892 	}
893 
894 	return ret;
895 }
896 
897 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
898 					  struct can_berr_counter *bec)
899 {
900 	struct kvaser_pciefd_can *can = netdev_priv(ndev);
901 
902 	bec->rxerr = can->bec.rxerr;
903 	bec->txerr = can->bec.txerr;
904 	return 0;
905 }
906 
907 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
908 {
909 	struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
910 
911 	kvaser_pciefd_enable_err_gen(can);
912 	kvaser_pciefd_request_status(can);
913 	can->err_rep_cnt = 0;
914 }
915 
916 static const struct net_device_ops kvaser_pciefd_netdev_ops = {
917 	.ndo_open = kvaser_pciefd_open,
918 	.ndo_stop = kvaser_pciefd_stop,
919 	.ndo_start_xmit = kvaser_pciefd_start_xmit,
920 	.ndo_change_mtu = can_change_mtu,
921 };
922 
923 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
924 {
925 	int i;
926 
927 	for (i = 0; i < pcie->nr_channels; i++) {
928 		struct net_device *netdev;
929 		struct kvaser_pciefd_can *can;
930 		u32 status, tx_npackets;
931 
932 		netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
933 				      KVASER_PCIEFD_CAN_TX_MAX_COUNT);
934 		if (!netdev)
935 			return -ENOMEM;
936 
937 		can = netdev_priv(netdev);
938 		netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
939 		can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
940 				i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
941 
942 		can->kv_pcie = pcie;
943 		can->cmd_seq = 0;
944 		can->err_rep_cnt = 0;
945 		can->bec.txerr = 0;
946 		can->bec.rxerr = 0;
947 
948 		init_completion(&can->start_comp);
949 		init_completion(&can->flush_comp);
950 		timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
951 			    0);
952 
953 		tx_npackets = ioread32(can->reg_base +
954 				       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
955 		if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
956 		      0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
957 			dev_err(&pcie->pci->dev,
958 				"Max Tx count is smaller than expected\n");
959 
960 			free_candev(netdev);
961 			return -ENODEV;
962 		}
963 
964 		can->can.clock.freq = pcie->freq;
965 		can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
966 		can->echo_idx = 0;
967 		spin_lock_init(&can->echo_lock);
968 		spin_lock_init(&can->lock);
969 		can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
970 		can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
971 
972 		can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
973 		can->can.do_set_data_bittiming =
974 			kvaser_pciefd_set_data_bittiming;
975 
976 		can->can.do_set_mode = kvaser_pciefd_set_mode;
977 		can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
978 
979 		can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
980 					      CAN_CTRLMODE_FD |
981 					      CAN_CTRLMODE_FD_NON_ISO;
982 
983 		status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
984 		if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
985 			dev_err(&pcie->pci->dev,
986 				"CAN FD not supported as expected %d\n", i);
987 
988 			free_candev(netdev);
989 			return -ENODEV;
990 		}
991 
992 		if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
993 			can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
994 
995 		netdev->flags |= IFF_ECHO;
996 
997 		SET_NETDEV_DEV(netdev, &pcie->pci->dev);
998 
999 		iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1000 		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
1001 			  KVASER_PCIEFD_KCAN_IRQ_TFD,
1002 			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1003 
1004 		pcie->can[i] = can;
1005 		kvaser_pciefd_pwm_start(can);
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1012 {
1013 	int i;
1014 
1015 	for (i = 0; i < pcie->nr_channels; i++) {
1016 		int err = register_candev(pcie->can[i]->can.dev);
1017 
1018 		if (err) {
1019 			int j;
1020 
1021 			/* Unregister all successfully registered devices. */
1022 			for (j = 0; j < i; j++)
1023 				unregister_candev(pcie->can[j]->can.dev);
1024 			return err;
1025 		}
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
1032 					dma_addr_t addr, int offset)
1033 {
1034 	u32 word1, word2;
1035 
1036 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1037 	word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
1038 	word2 = addr >> 32;
1039 #else
1040 	word1 = addr;
1041 	word2 = 0;
1042 #endif
1043 	iowrite32(word1, pcie->reg_base + offset);
1044 	iowrite32(word2, pcie->reg_base + offset + 4);
1045 }
1046 
1047 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1048 {
1049 	int i;
1050 	u32 srb_status;
1051 	dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1052 
1053 	/* Disable the DMA */
1054 	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1055 	for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1056 		unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
1057 
1058 		pcie->dma_data[i] =
1059 			dmam_alloc_coherent(&pcie->pci->dev,
1060 					    KVASER_PCIEFD_DMA_SIZE,
1061 					    &dma_addr[i],
1062 					    GFP_KERNEL);
1063 
1064 		if (!pcie->dma_data[i] || !dma_addr[i]) {
1065 			dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1066 				KVASER_PCIEFD_DMA_SIZE);
1067 			return -ENOMEM;
1068 		}
1069 
1070 		kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
1071 	}
1072 
1073 	/* Reset Rx FIFO, and both DMA buffers */
1074 	iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1075 		  KVASER_PCIEFD_SRB_CMD_RDB1,
1076 		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1077 
1078 	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1079 	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1080 		dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1081 		return -EIO;
1082 	}
1083 
1084 	/* Enable the DMA */
1085 	iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1086 		  pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1087 
1088 	return 0;
1089 }
1090 
1091 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1092 {
1093 	u32 sysid, srb_status, build;
1094 	u8 sysid_nr_chan;
1095 	int ret;
1096 
1097 	ret = kvaser_pciefd_read_cfg(pcie);
1098 	if (ret)
1099 		return ret;
1100 
1101 	sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
1102 	sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
1103 	if (pcie->nr_channels != sysid_nr_chan) {
1104 		dev_err(&pcie->pci->dev,
1105 			"Number of channels does not match: %u vs %u\n",
1106 			pcie->nr_channels,
1107 			sysid_nr_chan);
1108 		return -ENODEV;
1109 	}
1110 
1111 	if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
1112 		pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
1113 
1114 	build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
1115 	dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
1116 		(sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
1117 		sysid & 0xff,
1118 		(build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
1119 
1120 	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1121 	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1122 		dev_err(&pcie->pci->dev,
1123 			"Hardware without DMA is not supported\n");
1124 		return -ENODEV;
1125 	}
1126 
1127 	pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1128 	pcie->freq_to_ticks_div = pcie->freq / 1000000;
1129 	if (pcie->freq_to_ticks_div == 0)
1130 		pcie->freq_to_ticks_div = 1;
1131 
1132 	/* Turn off all loopback functionality */
1133 	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
1134 	return ret;
1135 }
1136 
1137 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1138 					    struct kvaser_pciefd_rx_packet *p,
1139 					    __le32 *data)
1140 {
1141 	struct sk_buff *skb;
1142 	struct canfd_frame *cf;
1143 	struct can_priv *priv;
1144 	struct net_device_stats *stats;
1145 	struct skb_shared_hwtstamps *shhwtstamps;
1146 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1147 
1148 	if (ch_id >= pcie->nr_channels)
1149 		return -EIO;
1150 
1151 	priv = &pcie->can[ch_id]->can;
1152 	stats = &priv->dev->stats;
1153 
1154 	if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1155 		skb = alloc_canfd_skb(priv->dev, &cf);
1156 		if (!skb) {
1157 			stats->rx_dropped++;
1158 			return -ENOMEM;
1159 		}
1160 
1161 		if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1162 			cf->flags |= CANFD_BRS;
1163 
1164 		if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1165 			cf->flags |= CANFD_ESI;
1166 	} else {
1167 		skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1168 		if (!skb) {
1169 			stats->rx_dropped++;
1170 			return -ENOMEM;
1171 		}
1172 	}
1173 
1174 	cf->can_id = p->header[0] & CAN_EFF_MASK;
1175 	if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1176 		cf->can_id |= CAN_EFF_FLAG;
1177 
1178 	cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1179 
1180 	if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
1181 		cf->can_id |= CAN_RTR_FLAG;
1182 	else
1183 		memcpy(cf->data, data, cf->len);
1184 
1185 	shhwtstamps = skb_hwtstamps(skb);
1186 
1187 	shhwtstamps->hwtstamp =
1188 		ns_to_ktime(div_u64(p->timestamp * 1000,
1189 				    pcie->freq_to_ticks_div));
1190 
1191 	stats->rx_bytes += cf->len;
1192 	stats->rx_packets++;
1193 
1194 	return netif_rx(skb);
1195 }
1196 
1197 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1198 				       struct can_frame *cf,
1199 				       enum can_state new_state,
1200 				       enum can_state tx_state,
1201 				       enum can_state rx_state)
1202 {
1203 	can_change_state(can->can.dev, cf, tx_state, rx_state);
1204 
1205 	if (new_state == CAN_STATE_BUS_OFF) {
1206 		struct net_device *ndev = can->can.dev;
1207 		unsigned long irq_flags;
1208 
1209 		spin_lock_irqsave(&can->lock, irq_flags);
1210 		netif_stop_queue(can->can.dev);
1211 		spin_unlock_irqrestore(&can->lock, irq_flags);
1212 
1213 		/* Prevent CAN controller from auto recover from bus off */
1214 		if (!can->can.restart_ms) {
1215 			kvaser_pciefd_start_controller_flush(can);
1216 			can_bus_off(ndev);
1217 		}
1218 	}
1219 }
1220 
1221 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1222 					  struct can_berr_counter *bec,
1223 					  enum can_state *new_state,
1224 					  enum can_state *tx_state,
1225 					  enum can_state *rx_state)
1226 {
1227 	if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1228 	    p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1229 		*new_state = CAN_STATE_BUS_OFF;
1230 	else if (bec->txerr >= 255 ||  bec->rxerr >= 255)
1231 		*new_state = CAN_STATE_BUS_OFF;
1232 	else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1233 		*new_state = CAN_STATE_ERROR_PASSIVE;
1234 	else if (bec->txerr >= 128 || bec->rxerr >= 128)
1235 		*new_state = CAN_STATE_ERROR_PASSIVE;
1236 	else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1237 		*new_state = CAN_STATE_ERROR_WARNING;
1238 	else if (bec->txerr >= 96 || bec->rxerr >= 96)
1239 		*new_state = CAN_STATE_ERROR_WARNING;
1240 	else
1241 		*new_state = CAN_STATE_ERROR_ACTIVE;
1242 
1243 	*tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1244 	*rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1245 }
1246 
1247 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1248 					struct kvaser_pciefd_rx_packet *p)
1249 {
1250 	struct can_berr_counter bec;
1251 	enum can_state old_state, new_state, tx_state, rx_state;
1252 	struct net_device *ndev = can->can.dev;
1253 	struct sk_buff *skb;
1254 	struct can_frame *cf = NULL;
1255 	struct skb_shared_hwtstamps *shhwtstamps;
1256 	struct net_device_stats *stats = &ndev->stats;
1257 
1258 	old_state = can->can.state;
1259 
1260 	bec.txerr = p->header[0] & 0xff;
1261 	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1262 
1263 	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1264 				      &rx_state);
1265 
1266 	skb = alloc_can_err_skb(ndev, &cf);
1267 
1268 	if (new_state != old_state) {
1269 		kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1270 					   rx_state);
1271 
1272 		if (old_state == CAN_STATE_BUS_OFF &&
1273 		    new_state == CAN_STATE_ERROR_ACTIVE &&
1274 		    can->can.restart_ms) {
1275 			can->can.can_stats.restarts++;
1276 			if (skb)
1277 				cf->can_id |= CAN_ERR_RESTARTED;
1278 		}
1279 	}
1280 
1281 	can->err_rep_cnt++;
1282 	can->can.can_stats.bus_error++;
1283 	stats->rx_errors++;
1284 
1285 	can->bec.txerr = bec.txerr;
1286 	can->bec.rxerr = bec.rxerr;
1287 
1288 	if (!skb) {
1289 		stats->rx_dropped++;
1290 		return -ENOMEM;
1291 	}
1292 
1293 	shhwtstamps = skb_hwtstamps(skb);
1294 	shhwtstamps->hwtstamp =
1295 		ns_to_ktime(div_u64(p->timestamp * 1000,
1296 				    can->kv_pcie->freq_to_ticks_div));
1297 	cf->can_id |= CAN_ERR_BUSERROR;
1298 
1299 	cf->data[6] = bec.txerr;
1300 	cf->data[7] = bec.rxerr;
1301 
1302 	stats->rx_packets++;
1303 	stats->rx_bytes += cf->can_dlc;
1304 
1305 	netif_rx(skb);
1306 	return 0;
1307 }
1308 
1309 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1310 					     struct kvaser_pciefd_rx_packet *p)
1311 {
1312 	struct kvaser_pciefd_can *can;
1313 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1314 
1315 	if (ch_id >= pcie->nr_channels)
1316 		return -EIO;
1317 
1318 	can = pcie->can[ch_id];
1319 
1320 	kvaser_pciefd_rx_error_frame(can, p);
1321 	if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1322 		/* Do not report more errors, until bec_poll_timer expires */
1323 		kvaser_pciefd_disable_err_gen(can);
1324 	/* Start polling the error counters */
1325 	mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1326 	return 0;
1327 }
1328 
1329 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1330 					    struct kvaser_pciefd_rx_packet *p)
1331 {
1332 	struct can_berr_counter bec;
1333 	enum can_state old_state, new_state, tx_state, rx_state;
1334 
1335 	old_state = can->can.state;
1336 
1337 	bec.txerr = p->header[0] & 0xff;
1338 	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1339 
1340 	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1341 				      &rx_state);
1342 
1343 	if (new_state != old_state) {
1344 		struct net_device *ndev = can->can.dev;
1345 		struct sk_buff *skb;
1346 		struct can_frame *cf;
1347 		struct skb_shared_hwtstamps *shhwtstamps;
1348 
1349 		skb = alloc_can_err_skb(ndev, &cf);
1350 		if (!skb) {
1351 			struct net_device_stats *stats = &ndev->stats;
1352 
1353 			stats->rx_dropped++;
1354 			return -ENOMEM;
1355 		}
1356 
1357 		kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1358 					   rx_state);
1359 
1360 		if (old_state == CAN_STATE_BUS_OFF &&
1361 		    new_state == CAN_STATE_ERROR_ACTIVE &&
1362 		    can->can.restart_ms) {
1363 			can->can.can_stats.restarts++;
1364 			cf->can_id |= CAN_ERR_RESTARTED;
1365 		}
1366 
1367 		shhwtstamps = skb_hwtstamps(skb);
1368 		shhwtstamps->hwtstamp =
1369 			ns_to_ktime(div_u64(p->timestamp * 1000,
1370 					    can->kv_pcie->freq_to_ticks_div));
1371 
1372 		cf->data[6] = bec.txerr;
1373 		cf->data[7] = bec.rxerr;
1374 
1375 		netif_rx(skb);
1376 	}
1377 	can->bec.txerr = bec.txerr;
1378 	can->bec.rxerr = bec.rxerr;
1379 	/* Check if we need to poll the error counters */
1380 	if (bec.txerr || bec.rxerr)
1381 		mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1382 
1383 	return 0;
1384 }
1385 
1386 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1387 					      struct kvaser_pciefd_rx_packet *p)
1388 {
1389 	struct kvaser_pciefd_can *can;
1390 	u8 cmdseq;
1391 	u32 status;
1392 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1393 
1394 	if (ch_id >= pcie->nr_channels)
1395 		return -EIO;
1396 
1397 	can = pcie->can[ch_id];
1398 
1399 	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1400 	cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
1401 
1402 	/* Reset done, start abort and flush */
1403 	if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1404 	    p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1405 	    p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1406 	    cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1407 	    status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1408 		u32 cmd;
1409 
1410 		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1411 			  can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1412 		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
1413 		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1414 		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1415 
1416 		iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
1417 			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1418 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1419 		   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1420 		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1421 		   status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1422 		/* Reset detected, send end of flush if no packet are in FIFO */
1423 		u8 count = ioread32(can->reg_base +
1424 				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1425 
1426 		if (!count)
1427 			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1428 				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1429 	} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1430 		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
1431 		/* Response to status request received */
1432 		kvaser_pciefd_handle_status_resp(can, p);
1433 		if (can->can.state != CAN_STATE_BUS_OFF &&
1434 		    can->can.state != CAN_STATE_ERROR_ACTIVE) {
1435 			mod_timer(&can->bec_poll_timer,
1436 				  KVASER_PCIEFD_BEC_POLL_FREQ);
1437 		}
1438 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1439 		   !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
1440 		/* Reset to bus on detected */
1441 		if (!completion_done(&can->start_comp))
1442 			complete(&can->start_comp);
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
1449 					    struct kvaser_pciefd_rx_packet *p)
1450 {
1451 	struct kvaser_pciefd_can *can;
1452 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1453 
1454 	if (ch_id >= pcie->nr_channels)
1455 		return -EIO;
1456 
1457 	can = pcie->can[ch_id];
1458 
1459 	/* If this is the last flushed packet, send end of flush */
1460 	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1461 		u8 count = ioread32(can->reg_base +
1462 				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1463 
1464 		if (count == 0)
1465 			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1466 				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1467 	} else {
1468 		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1469 		int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1470 		struct net_device_stats *stats = &can->can.dev->stats;
1471 
1472 		stats->tx_bytes += dlc;
1473 		stats->tx_packets++;
1474 
1475 		if (netif_queue_stopped(can->can.dev))
1476 			netif_wake_queue(can->can.dev);
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1483 					     struct kvaser_pciefd_rx_packet *p)
1484 {
1485 	struct sk_buff *skb;
1486 	struct net_device_stats *stats = &can->can.dev->stats;
1487 	struct can_frame *cf;
1488 
1489 	skb = alloc_can_err_skb(can->can.dev, &cf);
1490 
1491 	stats->tx_errors++;
1492 	if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1493 		if (skb)
1494 			cf->can_id |= CAN_ERR_LOSTARB;
1495 		can->can.can_stats.arbitration_lost++;
1496 	} else if (skb) {
1497 		cf->can_id |= CAN_ERR_ACK;
1498 	}
1499 
1500 	if (skb) {
1501 		cf->can_id |= CAN_ERR_BUSERROR;
1502 		stats->rx_bytes += cf->can_dlc;
1503 		stats->rx_packets++;
1504 		netif_rx(skb);
1505 	} else {
1506 		stats->rx_dropped++;
1507 		netdev_warn(can->can.dev, "No memory left for err_skb\n");
1508 	}
1509 }
1510 
1511 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1512 					   struct kvaser_pciefd_rx_packet *p)
1513 {
1514 	struct kvaser_pciefd_can *can;
1515 	bool one_shot_fail = false;
1516 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1517 
1518 	if (ch_id >= pcie->nr_channels)
1519 		return -EIO;
1520 
1521 	can = pcie->can[ch_id];
1522 	/* Ignore control packet ACK */
1523 	if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1524 		return 0;
1525 
1526 	if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1527 		kvaser_pciefd_handle_nack_packet(can, p);
1528 		one_shot_fail = true;
1529 	}
1530 
1531 	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1532 		netdev_dbg(can->can.dev, "Packet was flushed\n");
1533 	} else {
1534 		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1535 		int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1536 		u8 count = ioread32(can->reg_base +
1537 				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1538 
1539 		if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
1540 		    netif_queue_stopped(can->can.dev))
1541 			netif_wake_queue(can->can.dev);
1542 
1543 		if (!one_shot_fail) {
1544 			struct net_device_stats *stats = &can->can.dev->stats;
1545 
1546 			stats->tx_bytes += dlc;
1547 			stats->tx_packets++;
1548 		}
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1555 					      struct kvaser_pciefd_rx_packet *p)
1556 {
1557 	struct kvaser_pciefd_can *can;
1558 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1559 
1560 	if (ch_id >= pcie->nr_channels)
1561 		return -EIO;
1562 
1563 	can = pcie->can[ch_id];
1564 
1565 	if (!completion_done(&can->flush_comp))
1566 		complete(&can->flush_comp);
1567 
1568 	return 0;
1569 }
1570 
1571 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1572 				     int dma_buf)
1573 {
1574 	__le32 *buffer = pcie->dma_data[dma_buf];
1575 	__le64 timestamp;
1576 	struct kvaser_pciefd_rx_packet packet;
1577 	struct kvaser_pciefd_rx_packet *p = &packet;
1578 	u8 type;
1579 	int pos = *start_pos;
1580 	int size;
1581 	int ret = 0;
1582 
1583 	size = le32_to_cpu(buffer[pos++]);
1584 	if (!size) {
1585 		*start_pos = 0;
1586 		return 0;
1587 	}
1588 
1589 	p->header[0] = le32_to_cpu(buffer[pos++]);
1590 	p->header[1] = le32_to_cpu(buffer[pos++]);
1591 
1592 	/* Read 64-bit timestamp */
1593 	memcpy(&timestamp, &buffer[pos], sizeof(__le64));
1594 	pos += 2;
1595 	p->timestamp = le64_to_cpu(timestamp);
1596 
1597 	type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
1598 	switch (type) {
1599 	case KVASER_PCIEFD_PACK_TYPE_DATA:
1600 		ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1601 		if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1602 			u8 data_len;
1603 
1604 			data_len = can_dlc2len(p->header[1] >>
1605 					       KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1606 			pos += DIV_ROUND_UP(data_len, 4);
1607 		}
1608 		break;
1609 
1610 	case KVASER_PCIEFD_PACK_TYPE_ACK:
1611 		ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1612 		break;
1613 
1614 	case KVASER_PCIEFD_PACK_TYPE_STATUS:
1615 		ret = kvaser_pciefd_handle_status_packet(pcie, p);
1616 		break;
1617 
1618 	case KVASER_PCIEFD_PACK_TYPE_ERROR:
1619 		ret = kvaser_pciefd_handle_error_packet(pcie, p);
1620 		break;
1621 
1622 	case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1623 		ret = kvaser_pciefd_handle_eack_packet(pcie, p);
1624 		break;
1625 
1626 	case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1627 		ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1628 		break;
1629 
1630 	case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1631 	case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1632 	case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1633 		dev_info(&pcie->pci->dev,
1634 			 "Received unexpected packet type 0x%08X\n", type);
1635 		break;
1636 
1637 	default:
1638 		dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1639 		ret = -EIO;
1640 		break;
1641 	}
1642 
1643 	if (ret)
1644 		return ret;
1645 
1646 	/* Position does not point to the end of the package,
1647 	 * corrupted packet size?
1648 	 */
1649 	if ((*start_pos + size) != pos)
1650 		return -EIO;
1651 
1652 	/* Point to the next packet header, if any */
1653 	*start_pos = pos;
1654 
1655 	return ret;
1656 }
1657 
1658 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1659 {
1660 	int pos = 0;
1661 	int res = 0;
1662 
1663 	do {
1664 		res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1665 	} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1666 
1667 	return res;
1668 }
1669 
1670 static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1671 {
1672 	u32 irq;
1673 
1674 	irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1675 	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1676 		kvaser_pciefd_read_buffer(pcie, 0);
1677 		/* Reset DMA buffer 0 */
1678 		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1679 			  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1680 	}
1681 
1682 	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1683 		kvaser_pciefd_read_buffer(pcie, 1);
1684 		/* Reset DMA buffer 1 */
1685 		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1686 			  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1687 	}
1688 
1689 	if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1690 	    irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1691 	    irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1692 	    irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1693 		dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1694 
1695 	iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1696 	return 0;
1697 }
1698 
1699 static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1700 {
1701 	u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1702 
1703 	if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1704 		netdev_err(can->can.dev, "Tx FIFO overflow\n");
1705 
1706 	if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
1707 		u8 count = ioread32(can->reg_base +
1708 				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1709 
1710 		if (count == 0)
1711 			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1712 				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1713 	}
1714 
1715 	if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1716 		netdev_err(can->can.dev,
1717 			   "Fail to change bittiming, when not in reset mode\n");
1718 
1719 	if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1720 		netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1721 
1722 	if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1723 		netdev_err(can->can.dev, "Rx FIFO overflow\n");
1724 
1725 	iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1726 	return 0;
1727 }
1728 
1729 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1730 {
1731 	struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1732 	u32 board_irq;
1733 	int i;
1734 
1735 	board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1736 
1737 	if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
1738 		return IRQ_NONE;
1739 
1740 	if (board_irq & KVASER_PCIEFD_IRQ_SRB)
1741 		kvaser_pciefd_receive_irq(pcie);
1742 
1743 	for (i = 0; i < pcie->nr_channels; i++) {
1744 		if (!pcie->can[i]) {
1745 			dev_err(&pcie->pci->dev,
1746 				"IRQ mask points to unallocated controller\n");
1747 			break;
1748 		}
1749 
1750 		/* Check that mask matches channel (i) IRQ mask */
1751 		if (board_irq & (1 << i))
1752 			kvaser_pciefd_transmit_irq(pcie->can[i]);
1753 	}
1754 
1755 	iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1756 	return IRQ_HANDLED;
1757 }
1758 
1759 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1760 {
1761 	int i;
1762 	struct kvaser_pciefd_can *can;
1763 
1764 	for (i = 0; i < pcie->nr_channels; i++) {
1765 		can = pcie->can[i];
1766 		if (can) {
1767 			iowrite32(0,
1768 				  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1769 			kvaser_pciefd_pwm_stop(can);
1770 			free_candev(can->can.dev);
1771 		}
1772 	}
1773 }
1774 
1775 static int kvaser_pciefd_probe(struct pci_dev *pdev,
1776 			       const struct pci_device_id *id)
1777 {
1778 	int err;
1779 	struct kvaser_pciefd *pcie;
1780 
1781 	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1782 	if (!pcie)
1783 		return -ENOMEM;
1784 
1785 	pci_set_drvdata(pdev, pcie);
1786 	pcie->pci = pdev;
1787 
1788 	err = pci_enable_device(pdev);
1789 	if (err)
1790 		return err;
1791 
1792 	err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1793 	if (err)
1794 		goto err_disable_pci;
1795 
1796 	pcie->reg_base = pci_iomap(pdev, 0, 0);
1797 	if (!pcie->reg_base) {
1798 		err = -ENOMEM;
1799 		goto err_release_regions;
1800 	}
1801 
1802 	err = kvaser_pciefd_setup_board(pcie);
1803 	if (err)
1804 		goto err_pci_iounmap;
1805 
1806 	err = kvaser_pciefd_setup_dma(pcie);
1807 	if (err)
1808 		goto err_pci_iounmap;
1809 
1810 	pci_set_master(pdev);
1811 
1812 	err = kvaser_pciefd_setup_can_ctrls(pcie);
1813 	if (err)
1814 		goto err_teardown_can_ctrls;
1815 
1816 	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1817 		  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1818 
1819 	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1820 		  KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1821 		  KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1822 		  pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
1823 
1824 	/* Reset IRQ handling, expected to be off before */
1825 	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1826 		  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1827 	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1828 		  pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1829 
1830 	/* Ready the DMA buffers */
1831 	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1832 		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1833 	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1834 		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1835 
1836 	err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1837 			  IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1838 	if (err)
1839 		goto err_teardown_can_ctrls;
1840 
1841 	err = kvaser_pciefd_reg_candev(pcie);
1842 	if (err)
1843 		goto err_free_irq;
1844 
1845 	return 0;
1846 
1847 err_free_irq:
1848 	free_irq(pcie->pci->irq, pcie);
1849 
1850 err_teardown_can_ctrls:
1851 	kvaser_pciefd_teardown_can_ctrls(pcie);
1852 	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1853 	pci_clear_master(pdev);
1854 
1855 err_pci_iounmap:
1856 	pci_iounmap(pdev, pcie->reg_base);
1857 
1858 err_release_regions:
1859 	pci_release_regions(pdev);
1860 
1861 err_disable_pci:
1862 	pci_disable_device(pdev);
1863 
1864 	return err;
1865 }
1866 
1867 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1868 {
1869 	struct kvaser_pciefd_can *can;
1870 	int i;
1871 
1872 	for (i = 0; i < pcie->nr_channels; i++) {
1873 		can = pcie->can[i];
1874 		if (can) {
1875 			iowrite32(0,
1876 				  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1877 			unregister_candev(can->can.dev);
1878 			del_timer(&can->bec_poll_timer);
1879 			kvaser_pciefd_pwm_stop(can);
1880 			free_candev(can->can.dev);
1881 		}
1882 	}
1883 }
1884 
1885 static void kvaser_pciefd_remove(struct pci_dev *pdev)
1886 {
1887 	struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1888 
1889 	kvaser_pciefd_remove_all_ctrls(pcie);
1890 
1891 	/* Turn off IRQ generation */
1892 	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1893 	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1894 		  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1895 	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1896 
1897 	free_irq(pcie->pci->irq, pcie);
1898 
1899 	pci_clear_master(pdev);
1900 	pci_iounmap(pdev, pcie->reg_base);
1901 	pci_release_regions(pdev);
1902 	pci_disable_device(pdev);
1903 }
1904 
1905 static struct pci_driver kvaser_pciefd = {
1906 	.name = KVASER_PCIEFD_DRV_NAME,
1907 	.id_table = kvaser_pciefd_id_table,
1908 	.probe = kvaser_pciefd_probe,
1909 	.remove = kvaser_pciefd_remove,
1910 };
1911 
1912 module_pci_driver(kvaser_pciefd)
1913