1 // SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2 
3 /* Interrupt related logic for Mellanox Gigabit Ethernet driver
4  *
5  * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
6  */
7 
8 #include <linux/interrupt.h>
9 
10 #include "mlxbf_gige.h"
11 #include "mlxbf_gige_regs.h"
12 
13 static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id)
14 {
15 	struct mlxbf_gige *priv;
16 	u64 int_status;
17 
18 	priv = dev_id;
19 
20 	priv->error_intr_count++;
21 
22 	int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS);
23 
24 	if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR)
25 		priv->stats.hw_access_errors++;
26 
27 	if (int_status & MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS) {
28 		priv->stats.tx_invalid_checksums++;
29 		/* This error condition is latched into MLXBF_GIGE_INT_STATUS
30 		 * when the GigE silicon operates on the offending
31 		 * TX WQE. The write to MLXBF_GIGE_INT_STATUS at the bottom
32 		 * of this routine clears this error condition.
33 		 */
34 	}
35 
36 	if (int_status & MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE) {
37 		priv->stats.tx_small_frames++;
38 		/* This condition happens when the networking stack invokes
39 		 * this driver's "start_xmit()" method with a packet whose
40 		 * size < 60 bytes.  The GigE silicon will automatically pad
41 		 * this small frame up to a minimum-sized frame before it is
42 		 * sent. The "tx_small_frame" condition is latched into the
43 		 * MLXBF_GIGE_INT_STATUS register when the GigE silicon
44 		 * operates on the offending TX WQE. The write to
45 		 * MLXBF_GIGE_INT_STATUS at the bottom of this routine
46 		 * clears this condition.
47 		 */
48 	}
49 
50 	if (int_status & MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE)
51 		priv->stats.tx_index_errors++;
52 
53 	if (int_status & MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR)
54 		priv->stats.sw_config_errors++;
55 
56 	if (int_status & MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR)
57 		priv->stats.sw_access_errors++;
58 
59 	/* Clear all error interrupts by writing '1' back to
60 	 * all the asserted bits in INT_STATUS.  Do not write
61 	 * '1' back to 'receive packet' bit, since that is
62 	 * managed separately.
63 	 */
64 
65 	int_status &= ~MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET;
66 
67 	writeq(int_status, priv->base + MLXBF_GIGE_INT_STATUS);
68 
69 	return IRQ_HANDLED;
70 }
71 
72 static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
73 {
74 	struct mlxbf_gige *priv;
75 
76 	priv = dev_id;
77 
78 	priv->rx_intr_count++;
79 
80 	/* NOTE: GigE silicon automatically disables "packet rx" interrupt by
81 	 *       setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt
82 	 *       to the ARM cores.  Software needs to re-enable "packet rx"
83 	 *       interrupts by clearing MLXBF_GIGE_INT_MASK bit0.
84 	 */
85 
86 	napi_schedule(&priv->napi);
87 
88 	return IRQ_HANDLED;
89 }
90 
91 static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id)
92 {
93 	struct mlxbf_gige *priv;
94 
95 	priv = dev_id;
96 	priv->llu_plu_intr_count++;
97 
98 	return IRQ_HANDLED;
99 }
100 
101 int mlxbf_gige_request_irqs(struct mlxbf_gige *priv)
102 {
103 	int err;
104 
105 	err = request_irq(priv->error_irq, mlxbf_gige_error_intr, 0,
106 			  "mlxbf_gige_error", priv);
107 	if (err) {
108 		dev_err(priv->dev, "Request error_irq failure\n");
109 		return err;
110 	}
111 
112 	err = request_irq(priv->rx_irq, mlxbf_gige_rx_intr, 0,
113 			  "mlxbf_gige_rx", priv);
114 	if (err) {
115 		dev_err(priv->dev, "Request rx_irq failure\n");
116 		goto free_error_irq;
117 	}
118 
119 	err = request_irq(priv->llu_plu_irq, mlxbf_gige_llu_plu_intr, 0,
120 			  "mlxbf_gige_llu_plu", priv);
121 	if (err) {
122 		dev_err(priv->dev, "Request llu_plu_irq failure\n");
123 		goto free_rx_irq;
124 	}
125 
126 	return 0;
127 
128 free_rx_irq:
129 	free_irq(priv->rx_irq, priv);
130 
131 free_error_irq:
132 	free_irq(priv->error_irq, priv);
133 
134 	return err;
135 }
136 
137 void mlxbf_gige_free_irqs(struct mlxbf_gige *priv)
138 {
139 	free_irq(priv->error_irq, priv);
140 	free_irq(priv->rx_irq, priv);
141 	free_irq(priv->llu_plu_irq, priv);
142 }
143