1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
3  * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
4  *
5  * Copyright (C) 2016  PEAK System-Technik GmbH
6  */
7 
8 #include <linux/can.h>
9 #include <linux/can/dev.h>
10 
11 #include "peak_canfd_user.h"
12 
13 /* internal IP core cache size (used as default echo skbs max number) */
14 #define PCANFD_ECHO_SKB_MAX		24
15 
16 /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
17 static const struct can_bittiming_const peak_canfd_nominal_const = {
18 	.name = "peak_canfd",
19 	.tseg1_min = 1,
20 	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
21 	.tseg2_min = 1,
22 	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
23 	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
24 	.brp_min = 1,
25 	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
26 	.brp_inc = 1,
27 };
28 
29 static const struct can_bittiming_const peak_canfd_data_const = {
30 	.name = "peak_canfd",
31 	.tseg1_min = 1,
32 	.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
33 	.tseg2_min = 1,
34 	.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
35 	.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
36 	.brp_min = 1,
37 	.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
38 	.brp_inc = 1,
39 };
40 
41 static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
42 {
43 	priv->cmd_len = 0;
44 	return priv;
45 }
46 
47 static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
48 {
49 	struct pucan_command *cmd;
50 
51 	if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
52 		return NULL;
53 
54 	cmd = priv->cmd_buffer + priv->cmd_len;
55 
56 	/* reset all unused bit to default */
57 	memset(cmd, 0, sizeof(*cmd));
58 
59 	cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
60 	priv->cmd_len += sizeof(*cmd);
61 
62 	return cmd;
63 }
64 
65 static int pucan_write_cmd(struct peak_canfd_priv *priv)
66 {
67 	int err;
68 
69 	if (priv->pre_cmd) {
70 		err = priv->pre_cmd(priv);
71 		if (err)
72 			return err;
73 	}
74 
75 	err = priv->write_cmd(priv);
76 	if (err)
77 		return err;
78 
79 	if (priv->post_cmd)
80 		err = priv->post_cmd(priv);
81 
82 	return err;
83 }
84 
85 /* uCAN commands interface functions */
86 static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
87 {
88 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
89 	return pucan_write_cmd(priv);
90 }
91 
92 static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
93 {
94 	int err;
95 
96 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
97 	err = pucan_write_cmd(priv);
98 	if (!err)
99 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
100 
101 	return err;
102 }
103 
104 static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
105 {
106 	int err;
107 
108 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
109 	err = pucan_write_cmd(priv);
110 	if (!err)
111 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
112 
113 	return err;
114 }
115 
116 static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
117 				 const struct can_bittiming *pbt)
118 {
119 	struct pucan_timing_slow *cmd;
120 
121 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
122 
123 	cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
124 				       priv->can.ctrlmode &
125 				       CAN_CTRLMODE_3_SAMPLES);
126 	cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
127 	cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
128 	cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
129 
130 	cmd->ewl = 96;	/* default */
131 
132 	netdev_dbg(priv->ndev,
133 		   "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
134 		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
135 
136 	return pucan_write_cmd(priv);
137 }
138 
139 static int pucan_set_timing_fast(struct peak_canfd_priv *priv,
140 				 const struct can_bittiming *pbt)
141 {
142 	struct pucan_timing_fast *cmd;
143 
144 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST);
145 
146 	cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1);
147 	cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
148 	cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1);
149 	cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1));
150 
151 	netdev_dbg(priv->ndev,
152 		   "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
153 		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw);
154 
155 	return pucan_write_cmd(priv);
156 }
157 
158 static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
159 {
160 	struct pucan_std_filter *cmd;
161 
162 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
163 
164 	/* all the 11-bits CAN ID values are represented by one bit in a
165 	 * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the
166 	 * row while the lowest 5 bits select the bit in that row.
167 	 *
168 	 * bit	filter
169 	 * 1	passed
170 	 * 0	discarded
171 	 */
172 
173 	/* select the row */
174 	cmd->idx = row;
175 
176 	/* set/unset bits in the row */
177 	cmd->mask = cpu_to_le32(mask);
178 
179 	return pucan_write_cmd(priv);
180 }
181 
182 static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
183 {
184 	struct pucan_tx_abort *cmd;
185 
186 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
187 
188 	cmd->flags = cpu_to_le16(flags);
189 
190 	return pucan_write_cmd(priv);
191 }
192 
193 static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
194 {
195 	struct pucan_wr_err_cnt *cmd;
196 
197 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
198 
199 	cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
200 	cmd->tx_counter = 0;
201 	cmd->rx_counter = 0;
202 
203 	return pucan_write_cmd(priv);
204 }
205 
206 static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
207 {
208 	struct pucan_options *cmd;
209 
210 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
211 
212 	cmd->options = cpu_to_le16(opt_mask);
213 
214 	return pucan_write_cmd(priv);
215 }
216 
217 static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask)
218 {
219 	struct pucan_options *cmd;
220 
221 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION);
222 
223 	cmd->options = cpu_to_le16(opt_mask);
224 
225 	return pucan_write_cmd(priv);
226 }
227 
228 static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
229 {
230 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
231 
232 	return pucan_write_cmd(priv);
233 }
234 
235 static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
236 {
237 	struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
238 	u64 ts_us;
239 
240 	ts_us = (u64)le32_to_cpu(ts_high) << 32;
241 	ts_us |= le32_to_cpu(ts_low);
242 
243 	/* IP core timestamps are µs. */
244 	hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
245 
246 	return netif_rx(skb);
247 }
248 
249 /* handle the reception of one CAN frame */
250 static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
251 			       struct pucan_rx_msg *msg)
252 {
253 	struct net_device_stats *stats = &priv->ndev->stats;
254 	struct canfd_frame *cf;
255 	struct sk_buff *skb;
256 	const u16 rx_msg_flags = le16_to_cpu(msg->flags);
257 	u8 cf_len;
258 
259 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
260 		cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg));
261 	else
262 		cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg));
263 
264 	/* if this frame is an echo, */
265 	if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
266 		unsigned long flags;
267 
268 		spin_lock_irqsave(&priv->echo_lock, flags);
269 		can_get_echo_skb(priv->ndev, msg->client, NULL);
270 
271 		/* count bytes of the echo instead of skb */
272 		stats->tx_bytes += cf_len;
273 		stats->tx_packets++;
274 
275 		/* restart tx queue (a slot is free) */
276 		netif_wake_queue(priv->ndev);
277 
278 		spin_unlock_irqrestore(&priv->echo_lock, flags);
279 
280 		/* if this frame is only an echo, stop here. Otherwise,
281 		 * continue to push this application self-received frame into
282 		 * its own rx queue.
283 		 */
284 		if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
285 			return 0;
286 	}
287 
288 	/* otherwise, it should be pushed into rx fifo */
289 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
290 		/* CANFD frame case */
291 		skb = alloc_canfd_skb(priv->ndev, &cf);
292 		if (!skb)
293 			return -ENOMEM;
294 
295 		if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
296 			cf->flags |= CANFD_BRS;
297 
298 		if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
299 			cf->flags |= CANFD_ESI;
300 	} else {
301 		/* CAN 2.0 frame case */
302 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
303 		if (!skb)
304 			return -ENOMEM;
305 	}
306 
307 	cf->can_id = le32_to_cpu(msg->can_id);
308 	cf->len = cf_len;
309 
310 	if (rx_msg_flags & PUCAN_MSG_EXT_ID)
311 		cf->can_id |= CAN_EFF_FLAG;
312 
313 	if (rx_msg_flags & PUCAN_MSG_RTR)
314 		cf->can_id |= CAN_RTR_FLAG;
315 	else
316 		memcpy(cf->data, msg->d, cf->len);
317 
318 	stats->rx_bytes += cf->len;
319 	stats->rx_packets++;
320 
321 	pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
322 
323 	return 0;
324 }
325 
326 /* handle rx/tx error counters notification */
327 static int pucan_handle_error(struct peak_canfd_priv *priv,
328 			      struct pucan_error_msg *msg)
329 {
330 	priv->bec.txerr = msg->tx_err_cnt;
331 	priv->bec.rxerr = msg->rx_err_cnt;
332 
333 	return 0;
334 }
335 
336 /* handle status notification */
337 static int pucan_handle_status(struct peak_canfd_priv *priv,
338 			       struct pucan_status_msg *msg)
339 {
340 	struct net_device *ndev = priv->ndev;
341 	struct net_device_stats *stats = &ndev->stats;
342 	struct can_frame *cf;
343 	struct sk_buff *skb;
344 
345 	/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
346 	if (pucan_status_is_rx_barrier(msg)) {
347 		if (priv->enable_tx_path) {
348 			int err = priv->enable_tx_path(priv);
349 
350 			if (err)
351 				return err;
352 		}
353 
354 		/* start network queue (echo_skb array is empty) */
355 		netif_start_queue(ndev);
356 
357 		return 0;
358 	}
359 
360 	skb = alloc_can_err_skb(ndev, &cf);
361 
362 	/* test state error bits according to their priority */
363 	if (pucan_status_is_busoff(msg)) {
364 		netdev_dbg(ndev, "Bus-off entry status\n");
365 		priv->can.state = CAN_STATE_BUS_OFF;
366 		priv->can.can_stats.bus_off++;
367 		can_bus_off(ndev);
368 		if (skb)
369 			cf->can_id |= CAN_ERR_BUSOFF;
370 
371 	} else if (pucan_status_is_passive(msg)) {
372 		netdev_dbg(ndev, "Error passive status\n");
373 		priv->can.state = CAN_STATE_ERROR_PASSIVE;
374 		priv->can.can_stats.error_passive++;
375 		if (skb) {
376 			cf->can_id |= CAN_ERR_CRTL;
377 			cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
378 					CAN_ERR_CRTL_TX_PASSIVE :
379 					CAN_ERR_CRTL_RX_PASSIVE;
380 			cf->data[6] = priv->bec.txerr;
381 			cf->data[7] = priv->bec.rxerr;
382 		}
383 
384 	} else if (pucan_status_is_warning(msg)) {
385 		netdev_dbg(ndev, "Error warning status\n");
386 		priv->can.state = CAN_STATE_ERROR_WARNING;
387 		priv->can.can_stats.error_warning++;
388 		if (skb) {
389 			cf->can_id |= CAN_ERR_CRTL;
390 			cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
391 					CAN_ERR_CRTL_TX_WARNING :
392 					CAN_ERR_CRTL_RX_WARNING;
393 			cf->data[6] = priv->bec.txerr;
394 			cf->data[7] = priv->bec.rxerr;
395 		}
396 
397 	} else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) {
398 		/* back to ERROR_ACTIVE */
399 		netdev_dbg(ndev, "Error active status\n");
400 		can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE,
401 				 CAN_STATE_ERROR_ACTIVE);
402 	} else {
403 		dev_kfree_skb(skb);
404 		return 0;
405 	}
406 
407 	if (!skb) {
408 		stats->rx_dropped++;
409 		return -ENOMEM;
410 	}
411 
412 	stats->rx_packets++;
413 	stats->rx_bytes += cf->len;
414 	pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
415 
416 	return 0;
417 }
418 
419 /* handle uCAN Rx overflow notification */
420 static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
421 {
422 	struct net_device_stats *stats = &priv->ndev->stats;
423 	struct can_frame *cf;
424 	struct sk_buff *skb;
425 
426 	stats->rx_over_errors++;
427 	stats->rx_errors++;
428 
429 	skb = alloc_can_err_skb(priv->ndev, &cf);
430 	if (!skb) {
431 		stats->rx_dropped++;
432 		return -ENOMEM;
433 	}
434 
435 	cf->can_id |= CAN_ERR_CRTL;
436 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
437 
438 	cf->data[6] = priv->bec.txerr;
439 	cf->data[7] = priv->bec.rxerr;
440 
441 	stats->rx_bytes += cf->len;
442 	stats->rx_packets++;
443 	netif_rx(skb);
444 
445 	return 0;
446 }
447 
448 /* handle a single uCAN message */
449 int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
450 			  struct pucan_rx_msg *msg)
451 {
452 	u16 msg_type = le16_to_cpu(msg->type);
453 	int msg_size = le16_to_cpu(msg->size);
454 	int err;
455 
456 	if (!msg_size || !msg_type) {
457 		/* null packet found: end of list */
458 		goto exit;
459 	}
460 
461 	switch (msg_type) {
462 	case PUCAN_MSG_CAN_RX:
463 		err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
464 		break;
465 	case PUCAN_MSG_ERROR:
466 		err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
467 		break;
468 	case PUCAN_MSG_STATUS:
469 		err = pucan_handle_status(priv, (struct pucan_status_msg *)msg);
470 		break;
471 	case PUCAN_MSG_CACHE_CRITICAL:
472 		err = pucan_handle_cache_critical(priv);
473 		break;
474 	default:
475 		err = 0;
476 	}
477 
478 	if (err < 0)
479 		return err;
480 
481 exit:
482 	return msg_size;
483 }
484 
485 /* handle a list of rx_count messages from rx_msg memory address */
486 int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
487 				struct pucan_rx_msg *msg_list, int msg_count)
488 {
489 	void *msg_ptr = msg_list;
490 	int i, msg_size = 0;
491 
492 	for (i = 0; i < msg_count; i++) {
493 		msg_size = peak_canfd_handle_msg(priv, msg_ptr);
494 
495 		/* a null packet can be found at the end of a list */
496 		if (msg_size <= 0)
497 			break;
498 
499 		msg_ptr += ALIGN(msg_size, 4);
500 	}
501 
502 	if (msg_size < 0)
503 		return msg_size;
504 
505 	return i;
506 }
507 
508 static int peak_canfd_start(struct peak_canfd_priv *priv)
509 {
510 	int err;
511 
512 	err = pucan_clr_err_counters(priv);
513 	if (err)
514 		goto err_exit;
515 
516 	priv->echo_idx = 0;
517 
518 	priv->bec.txerr = 0;
519 	priv->bec.rxerr = 0;
520 
521 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
522 		err = pucan_set_listen_only_mode(priv);
523 	else
524 		err = pucan_set_normal_mode(priv);
525 
526 err_exit:
527 	return err;
528 }
529 
530 static void peak_canfd_stop(struct peak_canfd_priv *priv)
531 {
532 	int err;
533 
534 	/* go back to RESET mode */
535 	err = pucan_set_reset_mode(priv);
536 	if (err) {
537 		netdev_err(priv->ndev, "channel %u reset failed\n",
538 			   priv->index);
539 	} else {
540 		/* abort last Tx (MUST be done in RESET mode only!) */
541 		pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
542 	}
543 }
544 
545 static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
546 {
547 	struct peak_canfd_priv *priv = netdev_priv(ndev);
548 
549 	switch (mode) {
550 	case CAN_MODE_START:
551 		peak_canfd_start(priv);
552 		netif_wake_queue(ndev);
553 		break;
554 	default:
555 		return -EOPNOTSUPP;
556 	}
557 
558 	return 0;
559 }
560 
561 static int peak_canfd_get_berr_counter(const struct net_device *ndev,
562 				       struct can_berr_counter *bec)
563 {
564 	struct peak_canfd_priv *priv = netdev_priv(ndev);
565 
566 	*bec = priv->bec;
567 	return 0;
568 }
569 
570 static int peak_canfd_open(struct net_device *ndev)
571 {
572 	struct peak_canfd_priv *priv = netdev_priv(ndev);
573 	int i, err = 0;
574 
575 	err = open_candev(ndev);
576 	if (err) {
577 		netdev_err(ndev, "open_candev() failed, error %d\n", err);
578 		goto err_exit;
579 	}
580 
581 	err = pucan_set_reset_mode(priv);
582 	if (err)
583 		goto err_close;
584 
585 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
586 		if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
587 			err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO);
588 		else
589 			err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO);
590 
591 		if (err)
592 			goto err_close;
593 	}
594 
595 	/* set option: get rx/tx error counters */
596 	err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
597 	if (err)
598 		goto err_close;
599 
600 	/* accept all standard CAN ID */
601 	for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
602 		pucan_set_std_filter(priv, i, 0xffffffff);
603 
604 	err = peak_canfd_start(priv);
605 	if (err)
606 		goto err_close;
607 
608 	/* receiving the RB status says when Tx path is ready */
609 	err = pucan_setup_rx_barrier(priv);
610 	if (!err)
611 		goto err_exit;
612 
613 err_close:
614 	close_candev(ndev);
615 err_exit:
616 	return err;
617 }
618 
619 static int peak_canfd_set_bittiming(struct net_device *ndev)
620 {
621 	struct peak_canfd_priv *priv = netdev_priv(ndev);
622 
623 	return pucan_set_timing_slow(priv, &priv->can.bittiming);
624 }
625 
626 static int peak_canfd_set_data_bittiming(struct net_device *ndev)
627 {
628 	struct peak_canfd_priv *priv = netdev_priv(ndev);
629 
630 	return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
631 }
632 
633 static int peak_canfd_close(struct net_device *ndev)
634 {
635 	struct peak_canfd_priv *priv = netdev_priv(ndev);
636 
637 	netif_stop_queue(ndev);
638 	peak_canfd_stop(priv);
639 	close_candev(ndev);
640 
641 	return 0;
642 }
643 
644 static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
645 					 struct net_device *ndev)
646 {
647 	struct peak_canfd_priv *priv = netdev_priv(ndev);
648 	struct net_device_stats *stats = &ndev->stats;
649 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
650 	struct pucan_tx_msg *msg;
651 	u16 msg_size, msg_flags;
652 	unsigned long flags;
653 	bool should_stop_tx_queue;
654 	int room_left;
655 	u8 len;
656 
657 	if (can_dropped_invalid_skb(ndev, skb))
658 		return NETDEV_TX_OK;
659 
660 	msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
661 	msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
662 
663 	/* should never happen except under bus-off condition and (auto-)restart
664 	 * mechanism
665 	 */
666 	if (!msg) {
667 		stats->tx_dropped++;
668 		netif_stop_queue(ndev);
669 		return NETDEV_TX_BUSY;
670 	}
671 
672 	msg->size = cpu_to_le16(msg_size);
673 	msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
674 	msg_flags = 0;
675 
676 	if (cf->can_id & CAN_EFF_FLAG) {
677 		msg_flags |= PUCAN_MSG_EXT_ID;
678 		msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
679 	} else {
680 		msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
681 	}
682 
683 	if (can_is_canfd_skb(skb)) {
684 		/* CAN FD frame format */
685 		len = can_fd_len2dlc(cf->len);
686 
687 		msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
688 
689 		if (cf->flags & CANFD_BRS)
690 			msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
691 
692 		if (cf->flags & CANFD_ESI)
693 			msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
694 	} else {
695 		/* CAN 2.0 frame format */
696 		len = cf->len;
697 
698 		if (cf->can_id & CAN_RTR_FLAG)
699 			msg_flags |= PUCAN_MSG_RTR;
700 	}
701 
702 	/* always ask loopback for echo management */
703 	msg_flags |= PUCAN_MSG_LOOPED_BACK;
704 
705 	/* set driver specific bit to differentiate with application loopback */
706 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
707 		msg_flags |= PUCAN_MSG_SELF_RECEIVE;
708 
709 	msg->flags = cpu_to_le16(msg_flags);
710 	msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len);
711 	memcpy(msg->d, cf->data, cf->len);
712 
713 	/* struct msg client field is used as an index in the echo skbs ring */
714 	msg->client = priv->echo_idx;
715 
716 	spin_lock_irqsave(&priv->echo_lock, flags);
717 
718 	/* prepare and save echo skb in internal slot */
719 	can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
720 
721 	/* move echo index to the next slot */
722 	priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
723 
724 	/* if next slot is not free, stop network queue (no slot free in echo
725 	 * skb ring means that the controller did not write these frames on
726 	 * the bus: no need to continue).
727 	 */
728 	should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
729 
730 	/* stop network tx queue if not enough room to save one more msg too */
731 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
732 		should_stop_tx_queue |= (room_left <
733 					(sizeof(*msg) + CANFD_MAX_DLEN));
734 	else
735 		should_stop_tx_queue |= (room_left <
736 					(sizeof(*msg) + CAN_MAX_DLEN));
737 
738 	if (should_stop_tx_queue)
739 		netif_stop_queue(ndev);
740 
741 	spin_unlock_irqrestore(&priv->echo_lock, flags);
742 
743 	/* write the skb on the interface */
744 	priv->write_tx_msg(priv, msg);
745 
746 	return NETDEV_TX_OK;
747 }
748 
749 static const struct net_device_ops peak_canfd_netdev_ops = {
750 	.ndo_open = peak_canfd_open,
751 	.ndo_stop = peak_canfd_close,
752 	.ndo_start_xmit = peak_canfd_start_xmit,
753 	.ndo_change_mtu = can_change_mtu,
754 };
755 
756 struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
757 					int echo_skb_max)
758 {
759 	struct net_device *ndev;
760 	struct peak_canfd_priv *priv;
761 
762 	/* we DO support local echo */
763 	if (echo_skb_max < 0)
764 		echo_skb_max = PCANFD_ECHO_SKB_MAX;
765 
766 	/* allocate the candev object */
767 	ndev = alloc_candev(sizeof_priv, echo_skb_max);
768 	if (!ndev)
769 		return NULL;
770 
771 	priv = netdev_priv(ndev);
772 
773 	/* complete now socket-can initialization side */
774 	priv->can.state = CAN_STATE_STOPPED;
775 	priv->can.bittiming_const = &peak_canfd_nominal_const;
776 	priv->can.data_bittiming_const = &peak_canfd_data_const;
777 
778 	priv->can.do_set_mode = peak_canfd_set_mode;
779 	priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
780 	priv->can.do_set_bittiming = peak_canfd_set_bittiming;
781 	priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
782 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
783 				       CAN_CTRLMODE_LISTENONLY |
784 				       CAN_CTRLMODE_3_SAMPLES |
785 				       CAN_CTRLMODE_FD |
786 				       CAN_CTRLMODE_FD_NON_ISO |
787 				       CAN_CTRLMODE_BERR_REPORTING;
788 
789 	priv->ndev = ndev;
790 	priv->index = index;
791 	priv->cmd_len = 0;
792 	spin_lock_init(&priv->echo_lock);
793 
794 	ndev->flags |= IFF_ECHO;
795 	ndev->netdev_ops = &peak_canfd_netdev_ops;
796 	ndev->dev_id = index;
797 
798 	return ndev;
799 }
800