xref: /openbmc/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c (revision 4ed91d48259d9ddd378424d008f2e6559f7e78f8)
1 /* Copyright 2008-2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  *
15  * ALTERNATIVELY, this software may be distributed under the terms of the
16  * GNU General Public License ("GPL") as published by the Free Software
17  * Foundation, either version 2 of that License or (at your option) any
18  * later version.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
24  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/string.h>
35 
36 #include "dpaa_eth.h"
37 #include "mac.h"
38 
39 static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
40 	"interrupts",
41 	"rx packets",
42 	"tx packets",
43 	"tx confirm",
44 	"tx S/G",
45 	"tx error",
46 	"rx error",
47 };
48 
49 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
50 	/* dpa rx errors */
51 	"rx dma error",
52 	"rx frame physical error",
53 	"rx frame size error",
54 	"rx header error",
55 
56 	/* demultiplexing errors */
57 	"qman cg_tdrop",
58 	"qman wred",
59 	"qman error cond",
60 	"qman early window",
61 	"qman late window",
62 	"qman fq tdrop",
63 	"qman fq retired",
64 	"qman orp disabled",
65 
66 	/* congestion related stats */
67 	"congestion time (ms)",
68 	"entered congestion",
69 	"congested (0/1)"
70 };
71 
72 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
73 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
74 
75 static int dpaa_get_link_ksettings(struct net_device *net_dev,
76 				   struct ethtool_link_ksettings *cmd)
77 {
78 	int err;
79 
80 	if (!net_dev->phydev) {
81 		netdev_dbg(net_dev, "phy device not initialized\n");
82 		return 0;
83 	}
84 
85 	err = phy_ethtool_ksettings_get(net_dev->phydev, cmd);
86 
87 	return err;
88 }
89 
90 static int dpaa_set_link_ksettings(struct net_device *net_dev,
91 				   const struct ethtool_link_ksettings *cmd)
92 {
93 	int err;
94 
95 	if (!net_dev->phydev) {
96 		netdev_err(net_dev, "phy device not initialized\n");
97 		return -ENODEV;
98 	}
99 
100 	err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
101 	if (err < 0)
102 		netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
103 
104 	return err;
105 }
106 
107 static void dpaa_get_drvinfo(struct net_device *net_dev,
108 			     struct ethtool_drvinfo *drvinfo)
109 {
110 	int len;
111 
112 	strlcpy(drvinfo->driver, KBUILD_MODNAME,
113 		sizeof(drvinfo->driver));
114 	len = snprintf(drvinfo->version, sizeof(drvinfo->version),
115 		       "%X", 0);
116 	len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
117 		       "%X", 0);
118 
119 	if (len >= sizeof(drvinfo->fw_version)) {
120 		/* Truncated output */
121 		netdev_notice(net_dev, "snprintf() = %d\n", len);
122 	}
123 	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
124 		sizeof(drvinfo->bus_info));
125 }
126 
127 static u32 dpaa_get_msglevel(struct net_device *net_dev)
128 {
129 	return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
130 }
131 
132 static void dpaa_set_msglevel(struct net_device *net_dev,
133 			      u32 msg_enable)
134 {
135 	((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
136 }
137 
138 static int dpaa_nway_reset(struct net_device *net_dev)
139 {
140 	int err;
141 
142 	if (!net_dev->phydev) {
143 		netdev_err(net_dev, "phy device not initialized\n");
144 		return -ENODEV;
145 	}
146 
147 	err = 0;
148 	if (net_dev->phydev->autoneg) {
149 		err = phy_start_aneg(net_dev->phydev);
150 		if (err < 0)
151 			netdev_err(net_dev, "phy_start_aneg() = %d\n",
152 				   err);
153 	}
154 
155 	return err;
156 }
157 
158 static void dpaa_get_pauseparam(struct net_device *net_dev,
159 				struct ethtool_pauseparam *epause)
160 {
161 	struct mac_device *mac_dev;
162 	struct dpaa_priv *priv;
163 
164 	priv = netdev_priv(net_dev);
165 	mac_dev = priv->mac_dev;
166 
167 	if (!net_dev->phydev) {
168 		netdev_err(net_dev, "phy device not initialized\n");
169 		return;
170 	}
171 
172 	epause->autoneg = mac_dev->autoneg_pause;
173 	epause->rx_pause = mac_dev->rx_pause_active;
174 	epause->tx_pause = mac_dev->tx_pause_active;
175 }
176 
177 static int dpaa_set_pauseparam(struct net_device *net_dev,
178 			       struct ethtool_pauseparam *epause)
179 {
180 	struct mac_device *mac_dev;
181 	struct phy_device *phydev;
182 	bool rx_pause, tx_pause;
183 	struct dpaa_priv *priv;
184 	u32 newadv, oldadv;
185 	int err;
186 
187 	priv = netdev_priv(net_dev);
188 	mac_dev = priv->mac_dev;
189 
190 	phydev = net_dev->phydev;
191 	if (!phydev) {
192 		netdev_err(net_dev, "phy device not initialized\n");
193 		return -ENODEV;
194 	}
195 
196 	if (!(phydev->supported & SUPPORTED_Pause) ||
197 	    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
198 	    (epause->rx_pause != epause->tx_pause)))
199 		return -EINVAL;
200 
201 	/* The MAC should know how to handle PAUSE frame autonegotiation before
202 	 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
203 	 * settings.
204 	 */
205 	mac_dev->autoneg_pause = !!epause->autoneg;
206 	mac_dev->rx_pause_req = !!epause->rx_pause;
207 	mac_dev->tx_pause_req = !!epause->tx_pause;
208 
209 	/* Determine the sym/asym advertised PAUSE capabilities from the desired
210 	 * rx/tx pause settings.
211 	 */
212 	newadv = 0;
213 	if (epause->rx_pause)
214 		newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
215 	if (epause->tx_pause)
216 		newadv |= ADVERTISED_Asym_Pause;
217 
218 	oldadv = phydev->advertising &
219 			(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
220 
221 	/* If there are differences between the old and the new advertised
222 	 * values, restart PHY autonegotiation and advertise the new values.
223 	 */
224 	if (oldadv != newadv) {
225 		phydev->advertising &= ~(ADVERTISED_Pause
226 				| ADVERTISED_Asym_Pause);
227 		phydev->advertising |= newadv;
228 		if (phydev->autoneg) {
229 			err = phy_start_aneg(phydev);
230 			if (err < 0)
231 				netdev_err(net_dev, "phy_start_aneg() = %d\n",
232 					   err);
233 		}
234 	}
235 
236 	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
237 	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
238 	if (err < 0)
239 		netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
240 
241 	return err;
242 }
243 
244 static int dpaa_get_sset_count(struct net_device *net_dev, int type)
245 {
246 	unsigned int total_stats, num_stats;
247 
248 	num_stats   = num_online_cpus() + 1;
249 	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
250 			DPAA_STATS_GLOBAL_LEN;
251 
252 	switch (type) {
253 	case ETH_SS_STATS:
254 		return total_stats;
255 	default:
256 		return -EOPNOTSUPP;
257 	}
258 }
259 
260 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
261 		       int crr_cpu, u64 *bp_count, u64 *data)
262 {
263 	int num_values = num_cpus + 1;
264 	int crr = 0, j;
265 
266 	/* update current CPU's stats and also add them to the total values */
267 	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
268 	data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
269 
270 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
271 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
272 
273 	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
274 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
275 
276 	data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
277 	data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
278 
279 	data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
280 	data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
281 
282 	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
283 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
284 
285 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
286 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
287 
288 	for (j = 0; j < DPAA_BPS_NUM; j++) {
289 		data[crr * num_values + crr_cpu] = bp_count[j];
290 		data[crr++ * num_values + num_cpus] += bp_count[j];
291 	}
292 }
293 
294 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
295 				   struct ethtool_stats *stats, u64 *data)
296 {
297 	u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
298 	struct dpaa_percpu_priv *percpu_priv;
299 	struct dpaa_rx_errors rx_errors;
300 	unsigned int num_cpus, offset;
301 	struct dpaa_ern_cnt ern_cnt;
302 	struct dpaa_bp *dpaa_bp;
303 	struct dpaa_priv *priv;
304 	int total_stats, i, j;
305 	bool cg_status;
306 
307 	total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
308 	priv     = netdev_priv(net_dev);
309 	num_cpus = num_online_cpus();
310 
311 	memset(&bp_count, 0, sizeof(bp_count));
312 	memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
313 	memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
314 	memset(data, 0, total_stats * sizeof(u64));
315 
316 	for_each_online_cpu(i) {
317 		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
318 		for (j = 0; j < DPAA_BPS_NUM; j++) {
319 			dpaa_bp = priv->dpaa_bps[j];
320 			if (!dpaa_bp->percpu_count)
321 				continue;
322 			bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
323 		}
324 		rx_errors.dme += percpu_priv->rx_errors.dme;
325 		rx_errors.fpe += percpu_priv->rx_errors.fpe;
326 		rx_errors.fse += percpu_priv->rx_errors.fse;
327 		rx_errors.phe += percpu_priv->rx_errors.phe;
328 
329 		ern_cnt.cg_tdrop     += percpu_priv->ern_cnt.cg_tdrop;
330 		ern_cnt.wred         += percpu_priv->ern_cnt.wred;
331 		ern_cnt.err_cond     += percpu_priv->ern_cnt.err_cond;
332 		ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
333 		ern_cnt.late_window  += percpu_priv->ern_cnt.late_window;
334 		ern_cnt.fq_tdrop     += percpu_priv->ern_cnt.fq_tdrop;
335 		ern_cnt.fq_retired   += percpu_priv->ern_cnt.fq_retired;
336 		ern_cnt.orp_zero     += percpu_priv->ern_cnt.orp_zero;
337 
338 		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
339 	}
340 
341 	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
342 	memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
343 
344 	offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
345 	memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
346 
347 	/* gather congestion related counters */
348 	cg_num    = 0;
349 	cg_status = 0;
350 	cg_time   = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
351 	if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
352 		cg_num    = priv->cgr_data.cgr_congested_count;
353 
354 		/* reset congestion stats (like QMan API does */
355 		priv->cgr_data.congested_jiffies   = 0;
356 		priv->cgr_data.cgr_congested_count = 0;
357 	}
358 
359 	offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
360 	data[offset++] = cg_time;
361 	data[offset++] = cg_num;
362 	data[offset++] = cg_status;
363 }
364 
365 static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
366 			     u8 *data)
367 {
368 	unsigned int i, j, num_cpus, size;
369 	char string_cpu[ETH_GSTRING_LEN];
370 	u8 *strings;
371 
372 	memset(string_cpu, 0, sizeof(string_cpu));
373 	strings   = data;
374 	num_cpus  = num_online_cpus();
375 	size      = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
376 
377 	for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
378 		for (j = 0; j < num_cpus; j++) {
379 			snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
380 				 dpaa_stats_percpu[i], j);
381 			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
382 			strings += ETH_GSTRING_LEN;
383 		}
384 		snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
385 			 dpaa_stats_percpu[i]);
386 		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
387 		strings += ETH_GSTRING_LEN;
388 	}
389 	for (i = 0; i < DPAA_BPS_NUM; i++) {
390 		for (j = 0; j < num_cpus; j++) {
391 			snprintf(string_cpu, ETH_GSTRING_LEN,
392 				 "bpool %c [CPU %d]", 'a' + i, j);
393 			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
394 			strings += ETH_GSTRING_LEN;
395 		}
396 		snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
397 			 'a' + i);
398 		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
399 		strings += ETH_GSTRING_LEN;
400 	}
401 	memcpy(strings, dpaa_stats_global, size);
402 }
403 
404 const struct ethtool_ops dpaa_ethtool_ops = {
405 	.get_drvinfo = dpaa_get_drvinfo,
406 	.get_msglevel = dpaa_get_msglevel,
407 	.set_msglevel = dpaa_set_msglevel,
408 	.nway_reset = dpaa_nway_reset,
409 	.get_pauseparam = dpaa_get_pauseparam,
410 	.set_pauseparam = dpaa_set_pauseparam,
411 	.get_link = ethtool_op_get_link,
412 	.get_sset_count = dpaa_get_sset_count,
413 	.get_ethtool_stats = dpaa_get_ethtool_stats,
414 	.get_strings = dpaa_get_strings,
415 	.get_link_ksettings = dpaa_get_link_ksettings,
416 	.set_link_ksettings = dpaa_set_link_ksettings,
417 };
418