1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <asm/unaligned.h>
16 
17 #include "mcp251xfd.h"
18 #include "mcp251xfd-ram.h"
19 
20 static inline u8
21 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
22 				union mcp251xfd_write_reg_buf *write_reg_buf,
23 				const u16 reg, const u32 mask, const u32 val)
24 {
25 	u8 first_byte, last_byte, len;
26 	u8 *data;
27 	__le32 val_le32;
28 
29 	first_byte = mcp251xfd_first_byte_set(mask);
30 	last_byte = mcp251xfd_last_byte_set(mask);
31 	len = last_byte - first_byte + 1;
32 
33 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
34 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
35 	memcpy(data, &val_le32, len);
36 
37 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
38 		u16 crc;
39 
40 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
41 						     len);
42 		/* CRC */
43 		len += sizeof(write_reg_buf->crc.cmd);
44 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
45 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
46 
47 		/* Total length */
48 		len += sizeof(write_reg_buf->crc.crc);
49 	} else {
50 		len += sizeof(write_reg_buf->nocrc.cmd);
51 	}
52 
53 	return len;
54 }
55 
56 static void
57 mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
58 {
59 	struct mcp251xfd_tef_ring *tef_ring;
60 	struct spi_transfer *xfer;
61 	u32 val;
62 	u16 addr;
63 	u8 len;
64 	int i;
65 
66 	/* TEF */
67 	tef_ring = priv->tef;
68 	tef_ring->head = 0;
69 	tef_ring->tail = 0;
70 
71 	/* TEF- and TX-FIFO have same number of objects */
72 	*base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
73 
74 	/* FIFO IRQ enable */
75 	addr = MCP251XFD_REG_TEFCON;
76 	val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
77 
78 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
79 					      addr, val, val);
80 	tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
81 	tef_ring->irq_enable_xfer.len = len;
82 	spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
83 					&tef_ring->irq_enable_xfer, 1);
84 
85 	/* FIFO increment TEF tail pointer */
86 	addr = MCP251XFD_REG_TEFCON;
87 	val = MCP251XFD_REG_TEFCON_UINC;
88 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
89 					      addr, val, val);
90 
91 	for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
92 		xfer = &tef_ring->uinc_xfer[i];
93 		xfer->tx_buf = &tef_ring->uinc_buf;
94 		xfer->len = len;
95 		xfer->cs_change = 1;
96 		xfer->cs_change_delay.value = 0;
97 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
98 	}
99 
100 	/* "cs_change == 1" on the last transfer results in an active
101 	 * chip select after the complete SPI message. This causes the
102 	 * controller to interpret the next register access as
103 	 * data. Set "cs_change" of the last transfer to "0" to
104 	 * properly deactivate the chip select at the end of the
105 	 * message.
106 	 */
107 	xfer->cs_change = 0;
108 
109 	if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
110 		val = MCP251XFD_REG_TEFCON_UINC |
111 			MCP251XFD_REG_TEFCON_TEFOVIE |
112 			MCP251XFD_REG_TEFCON_TEFHIE;
113 
114 		len = mcp251xfd_cmd_prepare_write_reg(priv,
115 						      &tef_ring->uinc_irq_disable_buf,
116 						      addr, val, val);
117 		xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
118 		xfer->len = len;
119 	}
120 }
121 
122 static void
123 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
124 			      const struct mcp251xfd_tx_ring *ring,
125 			      struct mcp251xfd_tx_obj *tx_obj,
126 			      const u8 rts_buf_len,
127 			      const u8 n)
128 {
129 	struct spi_transfer *xfer;
130 	u16 addr;
131 
132 	/* FIFO load */
133 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
134 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
135 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
136 						     addr);
137 	else
138 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
139 					      addr);
140 
141 	xfer = &tx_obj->xfer[0];
142 	xfer->tx_buf = &tx_obj->buf;
143 	xfer->len = 0;	/* actual len is assigned on the fly */
144 	xfer->cs_change = 1;
145 	xfer->cs_change_delay.value = 0;
146 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
147 
148 	/* FIFO request to send */
149 	xfer = &tx_obj->xfer[1];
150 	xfer->tx_buf = &ring->rts_buf;
151 	xfer->len = rts_buf_len;
152 
153 	/* SPI message */
154 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
155 					ARRAY_SIZE(tx_obj->xfer));
156 }
157 
158 static void
159 mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
160 {
161 	struct mcp251xfd_tx_ring *tx_ring;
162 	struct mcp251xfd_tx_obj *tx_obj;
163 	u32 val;
164 	u16 addr;
165 	u8 len;
166 	int i;
167 
168 	tx_ring = priv->tx;
169 	tx_ring->head = 0;
170 	tx_ring->tail = 0;
171 	tx_ring->base = *base;
172 	tx_ring->nr = 0;
173 	tx_ring->fifo_nr = *fifo_nr;
174 
175 	*base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
176 	*fifo_nr += 1;
177 
178 	/* FIFO request to send */
179 	addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
180 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
181 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
182 					      addr, val, val);
183 
184 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
185 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
186 }
187 
188 static void
189 mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
190 {
191 	struct mcp251xfd_rx_ring *rx_ring;
192 	struct spi_transfer *xfer;
193 	u32 val;
194 	u16 addr;
195 	u8 len;
196 	int i, j;
197 
198 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
199 		rx_ring->head = 0;
200 		rx_ring->tail = 0;
201 		rx_ring->base = *base;
202 		rx_ring->nr = i;
203 		rx_ring->fifo_nr = *fifo_nr;
204 
205 		*base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
206 		*fifo_nr += 1;
207 
208 		/* FIFO IRQ enable */
209 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
210 		val = MCP251XFD_REG_FIFOCON_RXOVIE |
211 			MCP251XFD_REG_FIFOCON_TFNRFNIE;
212 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
213 						      addr, val, val);
214 		rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
215 		rx_ring->irq_enable_xfer.len = len;
216 		spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
217 						&rx_ring->irq_enable_xfer, 1);
218 
219 		/* FIFO increment RX tail pointer */
220 		val = MCP251XFD_REG_FIFOCON_UINC;
221 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
222 						      addr, val, val);
223 
224 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
225 			xfer = &rx_ring->uinc_xfer[j];
226 			xfer->tx_buf = &rx_ring->uinc_buf;
227 			xfer->len = len;
228 			xfer->cs_change = 1;
229 			xfer->cs_change_delay.value = 0;
230 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
231 		}
232 
233 		/* "cs_change == 1" on the last transfer results in an
234 		 * active chip select after the complete SPI
235 		 * message. This causes the controller to interpret
236 		 * the next register access as data. Set "cs_change"
237 		 * of the last transfer to "0" to properly deactivate
238 		 * the chip select at the end of the message.
239 		 */
240 		xfer->cs_change = 0;
241 
242 		/* Use 1st RX-FIFO for IRQ coalescing. If enabled
243 		 * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
244 		 * is activated), use the last transfer to disable:
245 		 *
246 		 * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
247 		 *
248 		 * and enable:
249 		 *
250 		 * - TFHRFHIE (Receive FIFO Half Full Interrupt)
251 		 *   - or -
252 		 * - TFERFFIE (Receive FIFO Full Interrupt)
253 		 *
254 		 * depending on rx_max_coalesce_frames_irq.
255 		 *
256 		 * The RXOVIE (Overflow Interrupt) is always enabled.
257 		 */
258 		if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
259 					 priv->rx_obj_num_coalesce_irq)) {
260 			val = MCP251XFD_REG_FIFOCON_UINC |
261 				MCP251XFD_REG_FIFOCON_RXOVIE;
262 
263 			if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
264 				val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
265 			else if (priv->rx_obj_num_coalesce_irq)
266 				val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
267 
268 			len = mcp251xfd_cmd_prepare_write_reg(priv,
269 							      &rx_ring->uinc_irq_disable_buf,
270 							      addr, val, val);
271 			xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
272 			xfer->len = len;
273 		}
274 	}
275 }
276 
277 int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
278 {
279 	const struct mcp251xfd_rx_ring *rx_ring;
280 	u16 base = 0, ram_used;
281 	u8 fifo_nr = 1;
282 	int i;
283 
284 	netdev_reset_queue(priv->ndev);
285 
286 	mcp251xfd_ring_init_tef(priv, &base);
287 	mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
288 	mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
289 
290 	/* mcp251xfd_handle_rxif() will iterate over all RX rings.
291 	 * Rings with their corresponding bit set in
292 	 * priv->regs_status.rxif are read out.
293 	 *
294 	 * If the chip is configured for only 1 RX-FIFO, and if there
295 	 * is an RX interrupt pending (RXIF in INT register is set),
296 	 * it must be the 1st RX-FIFO.
297 	 *
298 	 * We mark the RXIF of the 1st FIFO as pending here, so that
299 	 * we can skip the read of the RXIF register in
300 	 * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
301 	 *
302 	 * If we use more than 1 RX-FIFO, this value gets overwritten
303 	 * in mcp251xfd_read_regs_status(), so set it unconditionally
304 	 * here.
305 	 */
306 	priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
307 
308 	if (priv->tx_obj_num_coalesce_irq) {
309 		netdev_dbg(priv->ndev,
310 			   "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
311 			   mcp251xfd_get_tef_obj_addr(0),
312 			   priv->tx_obj_num_coalesce_irq,
313 			   sizeof(struct mcp251xfd_hw_tef_obj),
314 			   priv->tx_obj_num_coalesce_irq *
315 			   sizeof(struct mcp251xfd_hw_tef_obj));
316 
317 		netdev_dbg(priv->ndev,
318 			   "                         0x%03x: %2d*%zu bytes = %4zu bytes\n",
319 			   mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
320 			   priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
321 			   sizeof(struct mcp251xfd_hw_tef_obj),
322 			   (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
323 			   sizeof(struct mcp251xfd_hw_tef_obj));
324 	} else {
325 		netdev_dbg(priv->ndev,
326 			   "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes\n",
327 			   mcp251xfd_get_tef_obj_addr(0),
328 			   priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
329 			   priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
330 	}
331 
332 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
333 		if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
334 			netdev_dbg(priv->ndev,
335 				   "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
336 				   rx_ring->nr, rx_ring->fifo_nr,
337 				   mcp251xfd_get_rx_obj_addr(rx_ring, 0),
338 				   priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
339 				   priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
340 
341 			if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
342 				continue;
343 
344 			netdev_dbg(priv->ndev,
345 				   "                         0x%03x: %2u*%u bytes = %4u bytes\n",
346 				   mcp251xfd_get_rx_obj_addr(rx_ring,
347 							     priv->rx_obj_num_coalesce_irq),
348 				   rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
349 				   rx_ring->obj_size,
350 				   (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
351 				   rx_ring->obj_size);
352 		} else {
353 			netdev_dbg(priv->ndev,
354 				   "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
355 				   rx_ring->nr, rx_ring->fifo_nr,
356 				   mcp251xfd_get_rx_obj_addr(rx_ring, 0),
357 				   rx_ring->obj_num, rx_ring->obj_size,
358 				   rx_ring->obj_num * rx_ring->obj_size);
359 		}
360 	}
361 
362 	netdev_dbg(priv->ndev,
363 		   "FIFO setup: TX:   FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
364 		   priv->tx->fifo_nr,
365 		   mcp251xfd_get_tx_obj_addr(priv->tx, 0),
366 		   priv->tx->obj_num, priv->tx->obj_size,
367 		   priv->tx->obj_num * priv->tx->obj_size);
368 
369 	netdev_dbg(priv->ndev,
370 		   "FIFO setup: free:                             %4d bytes\n",
371 		   MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
372 
373 	ram_used = base - MCP251XFD_RAM_START;
374 	if (ram_used > MCP251XFD_RAM_SIZE) {
375 		netdev_err(priv->ndev,
376 			   "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
377 			   ram_used, MCP251XFD_RAM_SIZE);
378 		return -ENOMEM;
379 	}
380 
381 	return 0;
382 }
383 
384 void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
385 {
386 	int i;
387 
388 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
389 		kfree(priv->rx[i]);
390 		priv->rx[i] = NULL;
391 	}
392 }
393 
394 static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
395 {
396 	struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
397 						   rx_irq_timer);
398 	struct mcp251xfd_rx_ring *ring = priv->rx[0];
399 
400 	if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
401 		return HRTIMER_NORESTART;
402 
403 	spi_async(priv->spi, &ring->irq_enable_msg);
404 
405 	return HRTIMER_NORESTART;
406 }
407 
408 static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
409 {
410 	struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
411 						   tx_irq_timer);
412 	struct mcp251xfd_tef_ring *ring = priv->tef;
413 
414 	if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
415 		return HRTIMER_NORESTART;
416 
417 	spi_async(priv->spi, &ring->irq_enable_msg);
418 
419 	return HRTIMER_NORESTART;
420 }
421 
422 const struct can_ram_config mcp251xfd_ram_config = {
423 	.rx = {
424 		.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
425 		.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
426 		.min = MCP251XFD_RX_OBJ_NUM_MIN,
427 		.max = MCP251XFD_RX_OBJ_NUM_MAX,
428 		.def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
429 		.def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
430 		.fifo_num = MCP251XFD_FIFO_RX_NUM,
431 		.fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
432 		.fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
433 	},
434 	.tx = {
435 		.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
436 			sizeof(struct mcp251xfd_hw_tx_obj_can),
437 		.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
438 			sizeof(struct mcp251xfd_hw_tx_obj_canfd),
439 		.min = MCP251XFD_TX_OBJ_NUM_MIN,
440 		.max = MCP251XFD_TX_OBJ_NUM_MAX,
441 		.def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
442 		.def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
443 		.fifo_num = MCP251XFD_FIFO_TX_NUM,
444 		.fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
445 		.fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
446 	},
447 	.size = MCP251XFD_RAM_SIZE,
448 	.fifo_depth = MCP251XFD_FIFO_DEPTH,
449 };
450 
451 int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
452 {
453 	const bool fd_mode = mcp251xfd_is_fd_mode(priv);
454 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
455 	struct mcp251xfd_rx_ring *rx_ring;
456 	u8 tx_obj_size, rx_obj_size;
457 	u8 rem, i;
458 
459 	/* switching from CAN-2.0 to CAN-FD mode or vice versa */
460 	if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
461 		struct can_ram_layout layout;
462 
463 		can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
464 		priv->rx_obj_num = layout.default_rx;
465 		tx_ring->obj_num = layout.default_tx;
466 	}
467 
468 	if (fd_mode) {
469 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
470 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
471 		set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
472 	} else {
473 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
474 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
475 		clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
476 	}
477 
478 	tx_ring->obj_size = tx_obj_size;
479 
480 	rem = priv->rx_obj_num;
481 	for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
482 		u8 rx_obj_num;
483 
484 		if (i == 0 && priv->rx_obj_num_coalesce_irq)
485 			rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
486 					   MCP251XFD_FIFO_DEPTH);
487 		else
488 			rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
489 					   MCP251XFD_FIFO_DEPTH);
490 		rem -= rx_obj_num;
491 
492 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
493 				  GFP_KERNEL);
494 		if (!rx_ring) {
495 			mcp251xfd_ring_free(priv);
496 			return -ENOMEM;
497 		}
498 
499 		rx_ring->obj_num = rx_obj_num;
500 		rx_ring->obj_size = rx_obj_size;
501 		priv->rx[i] = rx_ring;
502 	}
503 	priv->rx_ring_num = i;
504 
505 	hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
506 	priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
507 
508 	hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
509 	priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
510 
511 	return 0;
512 }
513