1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #ifdef CONFIG_DEBUG_FS
5
6 #include <linux/fs.h>
7 #include <linux/debugfs.h>
8 #include <linux/if_bridge.h>
9 #include "i40e.h"
10 #include "i40e_virtchnl_pf.h"
11
12 static struct dentry *i40e_dbg_root;
13
14 enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18 };
19
20 /**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
i40e_dbg_find_vsi(struct i40e_pf * pf,int seid)25 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26 {
27 int i;
28
29 if (seid < 0)
30 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
31 else
32 for (i = 0; i < pf->num_alloc_vsi; i++)
33 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
34 return pf->vsi[i];
35
36 return NULL;
37 }
38
39 /**
40 * i40e_dbg_find_veb - searches for the veb with the given seid
41 * @pf: the PF structure to search for the veb
42 * @seid: seid of the veb it is searching for
43 **/
i40e_dbg_find_veb(struct i40e_pf * pf,int seid)44 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
45 {
46 int i;
47
48 for (i = 0; i < I40E_MAX_VEB; i++)
49 if (pf->veb[i] && pf->veb[i]->seid == seid)
50 return pf->veb[i];
51 return NULL;
52 }
53
54 /**************************************************************
55 * command
56 * The command entry in debugfs is for giving the driver commands
57 * to be executed - these may be for changing the internal switch
58 * setup, adding or removing filters, or other things. Many of
59 * these will be useful for some forms of unit testing.
60 **************************************************************/
61 static char i40e_dbg_command_buf[256] = "";
62
63 /**
64 * i40e_dbg_command_read - read for command datum
65 * @filp: the opened file
66 * @buffer: where to write the data for the user to read
67 * @count: the size of the user's buffer
68 * @ppos: file position offset
69 **/
i40e_dbg_command_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)70 static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
71 size_t count, loff_t *ppos)
72 {
73 struct i40e_pf *pf = filp->private_data;
74 int bytes_not_copied;
75 int buf_size = 256;
76 char *buf;
77 int len;
78
79 /* don't allow partial reads */
80 if (*ppos != 0)
81 return 0;
82 if (count < buf_size)
83 return -ENOSPC;
84
85 buf = kzalloc(buf_size, GFP_KERNEL);
86 if (!buf)
87 return -ENOSPC;
88
89 len = snprintf(buf, buf_size, "%s: %s\n",
90 pf->vsi[pf->lan_vsi]->netdev->name,
91 i40e_dbg_command_buf);
92
93 bytes_not_copied = copy_to_user(buffer, buf, len);
94 kfree(buf);
95
96 if (bytes_not_copied)
97 return -EFAULT;
98
99 *ppos = len;
100 return len;
101 }
102
103 static char *i40e_filter_state_string[] = {
104 "INVALID",
105 "NEW",
106 "ACTIVE",
107 "FAILED",
108 "REMOVE",
109 "NEW_SYNC",
110 };
111
112 /**
113 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
114 * @pf: the i40e_pf created in command write
115 * @seid: the seid the user put in
116 **/
i40e_dbg_dump_vsi_seid(struct i40e_pf * pf,int seid)117 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
118 {
119 struct rtnl_link_stats64 *nstat;
120 struct i40e_mac_filter *f;
121 struct i40e_vsi *vsi;
122 int i, bkt;
123
124 vsi = i40e_dbg_find_vsi(pf, seid);
125 if (!vsi) {
126 dev_info(&pf->pdev->dev,
127 "dump %d: seid not found\n", seid);
128 return;
129 }
130 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
131 if (vsi->netdev) {
132 struct net_device *nd = vsi->netdev;
133
134 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
135 nd->name, nd->state, nd->flags);
136 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
137 (unsigned long int)nd->features);
138 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
139 (unsigned long int)nd->hw_features);
140 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
141 (unsigned long int)nd->vlan_features);
142 }
143 dev_info(&pf->pdev->dev,
144 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
145 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
146 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
147 dev_info(&pf->pdev->dev,
148 " state[%d] = %08lx\n",
149 i, vsi->state[i]);
150 if (vsi == pf->vsi[pf->lan_vsi])
151 dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
152 pf->hw.mac.addr,
153 pf->hw.mac.san_addr,
154 pf->hw.mac.port_addr);
155 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
156 dev_info(&pf->pdev->dev,
157 " mac_filter_hash: %pM vid=%d, state %s\n",
158 f->macaddr, f->vlan,
159 i40e_filter_state_string[f->state]);
160 }
161 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
162 vsi->active_filters, vsi->promisc_threshold,
163 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
164 "ON" : "OFF"));
165 nstat = i40e_get_vsi_stats_struct(vsi);
166 dev_info(&pf->pdev->dev,
167 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
168 (unsigned long int)nstat->rx_packets,
169 (unsigned long int)nstat->rx_bytes,
170 (unsigned long int)nstat->rx_errors,
171 (unsigned long int)nstat->rx_dropped);
172 dev_info(&pf->pdev->dev,
173 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
174 (unsigned long int)nstat->tx_packets,
175 (unsigned long int)nstat->tx_bytes,
176 (unsigned long int)nstat->tx_errors,
177 (unsigned long int)nstat->tx_dropped);
178 dev_info(&pf->pdev->dev,
179 " net_stats: multicast = %lu, collisions = %lu\n",
180 (unsigned long int)nstat->multicast,
181 (unsigned long int)nstat->collisions);
182 dev_info(&pf->pdev->dev,
183 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
184 (unsigned long int)nstat->rx_length_errors,
185 (unsigned long int)nstat->rx_over_errors,
186 (unsigned long int)nstat->rx_crc_errors);
187 dev_info(&pf->pdev->dev,
188 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
189 (unsigned long int)nstat->rx_frame_errors,
190 (unsigned long int)nstat->rx_fifo_errors,
191 (unsigned long int)nstat->rx_missed_errors);
192 dev_info(&pf->pdev->dev,
193 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
194 (unsigned long int)nstat->tx_aborted_errors,
195 (unsigned long int)nstat->tx_carrier_errors,
196 (unsigned long int)nstat->tx_fifo_errors);
197 dev_info(&pf->pdev->dev,
198 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
199 (unsigned long int)nstat->tx_heartbeat_errors,
200 (unsigned long int)nstat->tx_window_errors);
201 dev_info(&pf->pdev->dev,
202 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
203 (unsigned long int)nstat->rx_compressed,
204 (unsigned long int)nstat->tx_compressed);
205 dev_info(&pf->pdev->dev,
206 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
207 (unsigned long int)vsi->net_stats_offsets.rx_packets,
208 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
209 (unsigned long int)vsi->net_stats_offsets.rx_errors,
210 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
211 dev_info(&pf->pdev->dev,
212 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
213 (unsigned long int)vsi->net_stats_offsets.tx_packets,
214 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
215 (unsigned long int)vsi->net_stats_offsets.tx_errors,
216 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
217 dev_info(&pf->pdev->dev,
218 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
219 (unsigned long int)vsi->net_stats_offsets.multicast,
220 (unsigned long int)vsi->net_stats_offsets.collisions);
221 dev_info(&pf->pdev->dev,
222 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
223 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
224 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
225 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
226 dev_info(&pf->pdev->dev,
227 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
228 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
229 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
230 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
231 dev_info(&pf->pdev->dev,
232 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
233 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
234 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
235 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
236 dev_info(&pf->pdev->dev,
237 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
238 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
239 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
240 dev_info(&pf->pdev->dev,
241 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
242 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
243 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
244 dev_info(&pf->pdev->dev,
245 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
246 vsi->tx_restart, vsi->tx_busy,
247 vsi->rx_buf_failed, vsi->rx_page_failed);
248 rcu_read_lock();
249 for (i = 0; i < vsi->num_queue_pairs; i++) {
250 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
251
252 if (!rx_ring)
253 continue;
254
255 dev_info(&pf->pdev->dev,
256 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
257 i, *rx_ring->state,
258 rx_ring->queue_index,
259 rx_ring->reg_idx);
260 dev_info(&pf->pdev->dev,
261 " rx_rings[%i]: rx_buf_len = %d\n",
262 i, rx_ring->rx_buf_len);
263 dev_info(&pf->pdev->dev,
264 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
265 i,
266 rx_ring->next_to_use,
267 rx_ring->next_to_clean,
268 rx_ring->ring_active);
269 dev_info(&pf->pdev->dev,
270 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
271 i, rx_ring->stats.packets,
272 rx_ring->stats.bytes,
273 rx_ring->rx_stats.non_eop_descs);
274 dev_info(&pf->pdev->dev,
275 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
276 i,
277 rx_ring->rx_stats.alloc_page_failed,
278 rx_ring->rx_stats.alloc_buff_failed);
279 dev_info(&pf->pdev->dev,
280 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
281 i,
282 rx_ring->rx_stats.page_reuse_count);
283 dev_info(&pf->pdev->dev,
284 " rx_rings[%i]: size = %i\n",
285 i, rx_ring->size);
286 dev_info(&pf->pdev->dev,
287 " rx_rings[%i]: itr_setting = %d (%s)\n",
288 i, rx_ring->itr_setting,
289 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
290 }
291 for (i = 0; i < vsi->num_queue_pairs; i++) {
292 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
293
294 if (!tx_ring)
295 continue;
296
297 dev_info(&pf->pdev->dev,
298 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
299 i, *tx_ring->state,
300 tx_ring->queue_index,
301 tx_ring->reg_idx);
302 dev_info(&pf->pdev->dev,
303 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
304 i,
305 tx_ring->next_to_use,
306 tx_ring->next_to_clean,
307 tx_ring->ring_active);
308 dev_info(&pf->pdev->dev,
309 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
310 i, tx_ring->stats.packets,
311 tx_ring->stats.bytes,
312 tx_ring->tx_stats.restart_queue);
313 dev_info(&pf->pdev->dev,
314 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
315 i,
316 tx_ring->tx_stats.tx_busy,
317 tx_ring->tx_stats.tx_done_old,
318 tx_ring->tx_stats.tx_stopped);
319 dev_info(&pf->pdev->dev,
320 " tx_rings[%i]: size = %i\n",
321 i, tx_ring->size);
322 dev_info(&pf->pdev->dev,
323 " tx_rings[%i]: DCB tc = %d\n",
324 i, tx_ring->dcb_tc);
325 dev_info(&pf->pdev->dev,
326 " tx_rings[%i]: itr_setting = %d (%s)\n",
327 i, tx_ring->itr_setting,
328 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
329 }
330 if (i40e_enabled_xdp_vsi(vsi)) {
331 for (i = 0; i < vsi->num_queue_pairs; i++) {
332 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
333
334 if (!xdp_ring)
335 continue;
336
337 dev_info(&pf->pdev->dev,
338 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
339 i, *xdp_ring->state,
340 xdp_ring->queue_index,
341 xdp_ring->reg_idx);
342 dev_info(&pf->pdev->dev,
343 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
344 i,
345 xdp_ring->next_to_use,
346 xdp_ring->next_to_clean,
347 xdp_ring->ring_active);
348 dev_info(&pf->pdev->dev,
349 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
350 i, xdp_ring->stats.packets,
351 xdp_ring->stats.bytes,
352 xdp_ring->tx_stats.restart_queue);
353 dev_info(&pf->pdev->dev,
354 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
355 i,
356 xdp_ring->tx_stats.tx_busy,
357 xdp_ring->tx_stats.tx_done_old);
358 dev_info(&pf->pdev->dev,
359 " xdp_rings[%i]: size = %i\n",
360 i, xdp_ring->size);
361 dev_info(&pf->pdev->dev,
362 " xdp_rings[%i]: DCB tc = %d\n",
363 i, xdp_ring->dcb_tc);
364 dev_info(&pf->pdev->dev,
365 " xdp_rings[%i]: itr_setting = %d (%s)\n",
366 i, xdp_ring->itr_setting,
367 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
368 "dynamic" : "fixed");
369 }
370 }
371 rcu_read_unlock();
372 dev_info(&pf->pdev->dev,
373 " work_limit = %d\n",
374 vsi->work_limit);
375 dev_info(&pf->pdev->dev,
376 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
377 vsi->max_frame, vsi->rx_buf_len, 0);
378 dev_info(&pf->pdev->dev,
379 " num_q_vectors = %i, base_vector = %i\n",
380 vsi->num_q_vectors, vsi->base_vector);
381 dev_info(&pf->pdev->dev,
382 " seid = %d, id = %d, uplink_seid = %d\n",
383 vsi->seid, vsi->id, vsi->uplink_seid);
384 dev_info(&pf->pdev->dev,
385 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
386 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
387 vsi->num_rx_desc);
388 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
389 if (vsi->type == I40E_VSI_SRIOV)
390 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
391 dev_info(&pf->pdev->dev,
392 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
393 vsi->info.valid_sections, vsi->info.switch_id);
394 dev_info(&pf->pdev->dev,
395 " info: sw_reserved[] = 0x%02x 0x%02x\n",
396 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
397 dev_info(&pf->pdev->dev,
398 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
399 vsi->info.sec_flags, vsi->info.sec_reserved);
400 dev_info(&pf->pdev->dev,
401 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
402 vsi->info.pvid, vsi->info.fcoe_pvid,
403 vsi->info.port_vlan_flags);
404 dev_info(&pf->pdev->dev,
405 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
406 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
407 vsi->info.pvlan_reserved[2]);
408 dev_info(&pf->pdev->dev,
409 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
410 vsi->info.ingress_table, vsi->info.egress_table);
411 dev_info(&pf->pdev->dev,
412 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
413 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
414 vsi->info.cas_pv_reserved);
415 dev_info(&pf->pdev->dev,
416 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
417 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
418 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
419 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
420 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
421 dev_info(&pf->pdev->dev,
422 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
423 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
424 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
425 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
426 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
427 dev_info(&pf->pdev->dev,
428 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
429 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
430 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
431 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
432 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
433 dev_info(&pf->pdev->dev,
434 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
435 vsi->info.queueing_opt_flags,
436 vsi->info.queueing_opt_reserved[0],
437 vsi->info.queueing_opt_reserved[1],
438 vsi->info.queueing_opt_reserved[2]);
439 dev_info(&pf->pdev->dev,
440 " info: up_enable_bits = 0x%02x\n",
441 vsi->info.up_enable_bits);
442 dev_info(&pf->pdev->dev,
443 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
444 vsi->info.sched_reserved, vsi->info.outer_up_table);
445 dev_info(&pf->pdev->dev,
446 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
447 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
448 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
449 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
450 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
451 dev_info(&pf->pdev->dev,
452 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
453 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
454 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
455 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
456 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
457 dev_info(&pf->pdev->dev,
458 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
459 vsi->info.stat_counter_idx, vsi->info.sched_id);
460 dev_info(&pf->pdev->dev,
461 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
462 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
463 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
464 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
465 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
466 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
467 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
468 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
469 dev_info(&pf->pdev->dev,
470 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
471 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
472 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
473 dev_info(&pf->pdev->dev,
474 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
475 i, vsi->tc_config.tc_info[i].qoffset,
476 vsi->tc_config.tc_info[i].qcount,
477 vsi->tc_config.tc_info[i].netdev_tc);
478 }
479 dev_info(&pf->pdev->dev,
480 " bw: bw_limit = %d, bw_max_quanta = %d\n",
481 vsi->bw_limit, vsi->bw_max_quanta);
482 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
483 dev_info(&pf->pdev->dev,
484 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
485 i, vsi->bw_ets_share_credits[i],
486 vsi->bw_ets_limit_credits[i],
487 vsi->bw_ets_max_quanta[i]);
488 }
489 }
490
491 /**
492 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
493 * @pf: the i40e_pf created in command write
494 **/
i40e_dbg_dump_aq_desc(struct i40e_pf * pf)495 static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
496 {
497 struct i40e_adminq_ring *ring;
498 struct i40e_hw *hw = &pf->hw;
499 char hdr[32];
500 int i;
501
502 snprintf(hdr, sizeof(hdr), "%s %s: ",
503 dev_driver_string(&pf->pdev->dev),
504 dev_name(&pf->pdev->dev));
505
506 /* first the send (command) ring, then the receive (event) ring */
507 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
508 ring = &(hw->aq.asq);
509 for (i = 0; i < ring->count; i++) {
510 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
511
512 dev_info(&pf->pdev->dev,
513 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
514 i, d->flags, d->opcode, d->datalen, d->retval,
515 d->cookie_high, d->cookie_low);
516 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
517 16, 1, d->params.raw, 16, 0);
518 }
519
520 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
521 ring = &(hw->aq.arq);
522 for (i = 0; i < ring->count; i++) {
523 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
524
525 dev_info(&pf->pdev->dev,
526 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
527 i, d->flags, d->opcode, d->datalen, d->retval,
528 d->cookie_high, d->cookie_low);
529 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
530 16, 1, d->params.raw, 16, 0);
531 }
532 }
533
534 /**
535 * i40e_dbg_dump_desc - handles dump desc write into command datum
536 * @cnt: number of arguments that the user supplied
537 * @vsi_seid: vsi id entered by user
538 * @ring_id: ring id entered by user
539 * @desc_n: descriptor number entered by user
540 * @pf: the i40e_pf created in command write
541 * @type: enum describing whether ring is RX, TX or XDP
542 **/
i40e_dbg_dump_desc(int cnt,int vsi_seid,int ring_id,int desc_n,struct i40e_pf * pf,enum ring_type type)543 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
544 struct i40e_pf *pf, enum ring_type type)
545 {
546 bool is_rx_ring = type == RING_TYPE_RX;
547 struct i40e_tx_desc *txd;
548 union i40e_rx_desc *rxd;
549 struct i40e_ring *ring;
550 struct i40e_vsi *vsi;
551 int i;
552
553 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
554 if (!vsi) {
555 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
556 return;
557 }
558 if (vsi->type != I40E_VSI_MAIN &&
559 vsi->type != I40E_VSI_FDIR &&
560 vsi->type != I40E_VSI_VMDQ2) {
561 dev_info(&pf->pdev->dev,
562 "vsi %d type %d descriptor rings not available\n",
563 vsi_seid, vsi->type);
564 return;
565 }
566 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
567 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
568 return;
569 }
570 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
571 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
572 return;
573 }
574 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
575 dev_info(&pf->pdev->dev,
576 "descriptor rings have not been allocated for vsi %d\n",
577 vsi_seid);
578 return;
579 }
580
581 switch (type) {
582 case RING_TYPE_RX:
583 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
584 break;
585 case RING_TYPE_TX:
586 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
587 break;
588 case RING_TYPE_XDP:
589 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
590 break;
591 default:
592 ring = NULL;
593 break;
594 }
595 if (!ring)
596 return;
597
598 if (cnt == 2) {
599 switch (type) {
600 case RING_TYPE_RX:
601 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
602 break;
603 case RING_TYPE_TX:
604 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
605 break;
606 case RING_TYPE_XDP:
607 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
608 break;
609 }
610 for (i = 0; i < ring->count; i++) {
611 if (!is_rx_ring) {
612 txd = I40E_TX_DESC(ring, i);
613 dev_info(&pf->pdev->dev,
614 " d[%03x] = 0x%016llx 0x%016llx\n",
615 i, txd->buffer_addr,
616 txd->cmd_type_offset_bsz);
617 } else {
618 rxd = I40E_RX_DESC(ring, i);
619 dev_info(&pf->pdev->dev,
620 " d[%03x] = 0x%016llx 0x%016llx\n",
621 i, rxd->read.pkt_addr,
622 rxd->read.hdr_addr);
623 }
624 }
625 } else if (cnt == 3) {
626 if (desc_n >= ring->count || desc_n < 0) {
627 dev_info(&pf->pdev->dev,
628 "descriptor %d not found\n", desc_n);
629 goto out;
630 }
631 if (!is_rx_ring) {
632 txd = I40E_TX_DESC(ring, desc_n);
633 dev_info(&pf->pdev->dev,
634 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
635 vsi_seid, ring_id, desc_n,
636 txd->buffer_addr, txd->cmd_type_offset_bsz);
637 } else {
638 rxd = I40E_RX_DESC(ring, desc_n);
639 dev_info(&pf->pdev->dev,
640 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
641 vsi_seid, ring_id, desc_n,
642 rxd->read.pkt_addr, rxd->read.hdr_addr);
643 }
644 } else {
645 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
646 }
647
648 out:
649 kfree(ring);
650 }
651
652 /**
653 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
654 * @pf: the i40e_pf created in command write
655 **/
i40e_dbg_dump_vsi_no_seid(struct i40e_pf * pf)656 static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
657 {
658 int i;
659
660 for (i = 0; i < pf->num_alloc_vsi; i++)
661 if (pf->vsi[i])
662 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
663 i, pf->vsi[i]->seid);
664 }
665
666 /**
667 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
668 * @pf: the i40e_pf created in command write
669 * @estats: the eth stats structure to be dumped
670 **/
i40e_dbg_dump_eth_stats(struct i40e_pf * pf,struct i40e_eth_stats * estats)671 static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
672 struct i40e_eth_stats *estats)
673 {
674 dev_info(&pf->pdev->dev, " ethstats:\n");
675 dev_info(&pf->pdev->dev,
676 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
677 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
678 dev_info(&pf->pdev->dev,
679 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
680 estats->rx_broadcast, estats->rx_discards);
681 dev_info(&pf->pdev->dev,
682 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
683 estats->rx_unknown_protocol, estats->tx_bytes);
684 dev_info(&pf->pdev->dev,
685 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
686 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
687 dev_info(&pf->pdev->dev,
688 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
689 estats->tx_discards, estats->tx_errors);
690 }
691
692 /**
693 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
694 * @pf: the i40e_pf created in command write
695 * @seid: the seid the user put in
696 **/
i40e_dbg_dump_veb_seid(struct i40e_pf * pf,int seid)697 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
698 {
699 struct i40e_veb *veb;
700
701 veb = i40e_dbg_find_veb(pf, seid);
702 if (!veb) {
703 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
704 return;
705 }
706 dev_info(&pf->pdev->dev,
707 "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
708 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
709 veb->uplink_seid,
710 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
711 i40e_dbg_dump_eth_stats(pf, &veb->stats);
712 }
713
714 /**
715 * i40e_dbg_dump_veb_all - dumps all known veb's stats
716 * @pf: the i40e_pf created in command write
717 **/
i40e_dbg_dump_veb_all(struct i40e_pf * pf)718 static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
719 {
720 struct i40e_veb *veb;
721 int i;
722
723 for (i = 0; i < I40E_MAX_VEB; i++) {
724 veb = pf->veb[i];
725 if (veb)
726 i40e_dbg_dump_veb_seid(pf, veb->seid);
727 }
728 }
729
730 /**
731 * i40e_dbg_dump_vf - dump VF info
732 * @pf: the i40e_pf created in command write
733 * @vf_id: the vf_id from the user
734 **/
i40e_dbg_dump_vf(struct i40e_pf * pf,int vf_id)735 static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
736 {
737 struct i40e_vf *vf;
738 struct i40e_vsi *vsi;
739
740 if (!pf->num_alloc_vfs) {
741 dev_info(&pf->pdev->dev, "no VFs allocated\n");
742 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
743 vf = &pf->vf[vf_id];
744 vsi = pf->vsi[vf->lan_vsi_idx];
745 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
746 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
747 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
748 vf->num_mdd_events);
749 } else {
750 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
751 }
752 }
753
754 /**
755 * i40e_dbg_dump_vf_all - dump VF info for all VFs
756 * @pf: the i40e_pf created in command write
757 **/
i40e_dbg_dump_vf_all(struct i40e_pf * pf)758 static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
759 {
760 int i;
761
762 if (!pf->num_alloc_vfs)
763 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
764 else
765 for (i = 0; i < pf->num_alloc_vfs; i++)
766 i40e_dbg_dump_vf(pf, i);
767 }
768
769 /**
770 * i40e_dbg_command_write - write into command datum
771 * @filp: the opened file
772 * @buffer: where to find the user's data
773 * @count: the length of the user's data
774 * @ppos: file position offset
775 **/
i40e_dbg_command_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)776 static ssize_t i40e_dbg_command_write(struct file *filp,
777 const char __user *buffer,
778 size_t count, loff_t *ppos)
779 {
780 struct i40e_pf *pf = filp->private_data;
781 char *cmd_buf, *cmd_buf_tmp;
782 int bytes_not_copied;
783 struct i40e_vsi *vsi;
784 int vsi_seid;
785 int veb_seid;
786 int vf_id;
787 int cnt;
788
789 /* don't allow partial writes */
790 if (*ppos != 0)
791 return 0;
792
793 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
794 if (!cmd_buf)
795 return count;
796 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
797 if (bytes_not_copied) {
798 kfree(cmd_buf);
799 return -EFAULT;
800 }
801 cmd_buf[count] = '\0';
802
803 cmd_buf_tmp = strchr(cmd_buf, '\n');
804 if (cmd_buf_tmp) {
805 *cmd_buf_tmp = '\0';
806 count = cmd_buf_tmp - cmd_buf + 1;
807 }
808
809 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
810 vsi_seid = -1;
811 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
812 if (cnt == 0) {
813 /* default to PF VSI */
814 vsi_seid = pf->vsi[pf->lan_vsi]->seid;
815 } else if (vsi_seid < 0) {
816 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
817 vsi_seid);
818 goto command_write_done;
819 }
820
821 /* By default we are in VEPA mode, if this is the first VF/VMDq
822 * VSI to be added switch to VEB mode.
823 */
824 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
825 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
826 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
827 }
828
829 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
830 if (vsi)
831 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
832 vsi->seid, vsi->uplink_seid);
833 else
834 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
835
836 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
837 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
838 if (cnt != 1) {
839 dev_info(&pf->pdev->dev,
840 "del vsi: bad command string, cnt=%d\n",
841 cnt);
842 goto command_write_done;
843 }
844 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
845 if (!vsi) {
846 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
847 vsi_seid);
848 goto command_write_done;
849 }
850
851 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
852 i40e_vsi_release(vsi);
853
854 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
855 struct i40e_veb *veb;
856 int uplink_seid, i;
857
858 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
859 if (cnt != 2) {
860 dev_info(&pf->pdev->dev,
861 "add relay: bad command string, cnt=%d\n",
862 cnt);
863 goto command_write_done;
864 } else if (uplink_seid < 0) {
865 dev_info(&pf->pdev->dev,
866 "add relay %d: bad uplink seid\n",
867 uplink_seid);
868 goto command_write_done;
869 }
870
871 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
872 if (!vsi) {
873 dev_info(&pf->pdev->dev,
874 "add relay: VSI %d not found\n", vsi_seid);
875 goto command_write_done;
876 }
877
878 for (i = 0; i < I40E_MAX_VEB; i++)
879 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
880 break;
881 if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
882 uplink_seid != pf->mac_seid) {
883 dev_info(&pf->pdev->dev,
884 "add relay: relay uplink %d not found\n",
885 uplink_seid);
886 goto command_write_done;
887 }
888
889 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
890 vsi->tc_config.enabled_tc);
891 if (veb)
892 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
893 else
894 dev_info(&pf->pdev->dev, "add relay failed\n");
895
896 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
897 int i;
898 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
899 if (cnt != 1) {
900 dev_info(&pf->pdev->dev,
901 "del relay: bad command string, cnt=%d\n",
902 cnt);
903 goto command_write_done;
904 } else if (veb_seid < 0) {
905 dev_info(&pf->pdev->dev,
906 "del relay %d: bad relay seid\n", veb_seid);
907 goto command_write_done;
908 }
909
910 /* find the veb */
911 for (i = 0; i < I40E_MAX_VEB; i++)
912 if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
913 break;
914 if (i >= I40E_MAX_VEB) {
915 dev_info(&pf->pdev->dev,
916 "del relay: relay %d not found\n", veb_seid);
917 goto command_write_done;
918 }
919
920 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
921 i40e_veb_release(pf->veb[i]);
922 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
923 unsigned int v;
924 int ret;
925 u16 vid;
926
927 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
928 if (cnt != 2) {
929 dev_info(&pf->pdev->dev,
930 "add pvid: bad command string, cnt=%d\n", cnt);
931 goto command_write_done;
932 }
933
934 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
935 if (!vsi) {
936 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
937 vsi_seid);
938 goto command_write_done;
939 }
940
941 vid = v;
942 ret = i40e_vsi_add_pvid(vsi, vid);
943 if (!ret)
944 dev_info(&pf->pdev->dev,
945 "add pvid: %d added to VSI %d\n",
946 vid, vsi_seid);
947 else
948 dev_info(&pf->pdev->dev,
949 "add pvid: %d to VSI %d failed, ret=%d\n",
950 vid, vsi_seid, ret);
951
952 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
953
954 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
955 if (cnt != 1) {
956 dev_info(&pf->pdev->dev,
957 "del pvid: bad command string, cnt=%d\n",
958 cnt);
959 goto command_write_done;
960 }
961
962 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
963 if (!vsi) {
964 dev_info(&pf->pdev->dev,
965 "del pvid: VSI %d not found\n", vsi_seid);
966 goto command_write_done;
967 }
968
969 i40e_vsi_remove_pvid(vsi);
970 dev_info(&pf->pdev->dev,
971 "del pvid: removed from VSI %d\n", vsi_seid);
972
973 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
974 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
975 i40e_fetch_switch_configuration(pf, true);
976 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
977 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
978 if (cnt > 0)
979 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
980 else
981 i40e_dbg_dump_vsi_no_seid(pf);
982 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
983 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
984 if (cnt > 0)
985 i40e_dbg_dump_veb_seid(pf, vsi_seid);
986 else
987 i40e_dbg_dump_veb_all(pf);
988 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
989 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
990 if (cnt > 0)
991 i40e_dbg_dump_vf(pf, vf_id);
992 else
993 i40e_dbg_dump_vf_all(pf);
994 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
995 int ring_id, desc_n;
996 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
997 cnt = sscanf(&cmd_buf[12], "%i %i %i",
998 &vsi_seid, &ring_id, &desc_n);
999 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1000 desc_n, pf, RING_TYPE_RX);
1001 } else if (strncmp(&cmd_buf[10], "tx", 2)
1002 == 0) {
1003 cnt = sscanf(&cmd_buf[12], "%i %i %i",
1004 &vsi_seid, &ring_id, &desc_n);
1005 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1006 desc_n, pf, RING_TYPE_TX);
1007 } else if (strncmp(&cmd_buf[10], "xdp", 3)
1008 == 0) {
1009 cnt = sscanf(&cmd_buf[13], "%i %i %i",
1010 &vsi_seid, &ring_id, &desc_n);
1011 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1012 desc_n, pf, RING_TYPE_XDP);
1013 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
1014 i40e_dbg_dump_aq_desc(pf);
1015 } else {
1016 dev_info(&pf->pdev->dev,
1017 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1018 dev_info(&pf->pdev->dev,
1019 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1020 dev_info(&pf->pdev->dev,
1021 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1022 dev_info(&pf->pdev->dev, "dump desc aq\n");
1023 }
1024 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1025 dev_info(&pf->pdev->dev,
1026 "core reset count: %d\n", pf->corer_count);
1027 dev_info(&pf->pdev->dev,
1028 "global reset count: %d\n", pf->globr_count);
1029 dev_info(&pf->pdev->dev,
1030 "emp reset count: %d\n", pf->empr_count);
1031 dev_info(&pf->pdev->dev,
1032 "pf reset count: %d\n", pf->pfr_count);
1033 dev_info(&pf->pdev->dev,
1034 "pf tx sluggish count: %d\n",
1035 pf->tx_sluggish_count);
1036 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1037 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1038 struct i40e_dcbx_config *cfg =
1039 &pf->hw.local_dcbx_config;
1040 struct i40e_dcbx_config *r_cfg =
1041 &pf->hw.remote_dcbx_config;
1042 int i, ret;
1043 u16 switch_id;
1044
1045 bw_data = kzalloc(sizeof(
1046 struct i40e_aqc_query_port_ets_config_resp),
1047 GFP_KERNEL);
1048 if (!bw_data) {
1049 ret = -ENOMEM;
1050 goto command_write_done;
1051 }
1052
1053 vsi = pf->vsi[pf->lan_vsi];
1054 switch_id =
1055 le16_to_cpu(vsi->info.switch_id) &
1056 I40E_AQ_VSI_SW_ID_MASK;
1057
1058 ret = i40e_aq_query_port_ets_config(&pf->hw,
1059 switch_id,
1060 bw_data, NULL);
1061 if (ret) {
1062 dev_info(&pf->pdev->dev,
1063 "Query Port ETS Config AQ command failed =0x%x\n",
1064 pf->hw.aq.asq_last_status);
1065 kfree(bw_data);
1066 bw_data = NULL;
1067 goto command_write_done;
1068 }
1069 dev_info(&pf->pdev->dev,
1070 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1071 bw_data->tc_valid_bits,
1072 bw_data->tc_strict_priority_bits,
1073 le16_to_cpu(bw_data->tc_bw_max[0]),
1074 le16_to_cpu(bw_data->tc_bw_max[1]));
1075 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1076 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1077 bw_data->tc_bw_share_credits[i],
1078 le16_to_cpu(bw_data->tc_bw_limits[i]));
1079 }
1080
1081 kfree(bw_data);
1082 bw_data = NULL;
1083
1084 dev_info(&pf->pdev->dev,
1085 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1086 dev_info(&pf->pdev->dev,
1087 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1088 cfg->etscfg.willing, cfg->etscfg.cbs,
1089 cfg->etscfg.maxtcs);
1090 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1091 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1092 i, cfg->etscfg.prioritytable[i],
1093 cfg->etscfg.tcbwtable[i],
1094 cfg->etscfg.tsatable[i]);
1095 }
1096 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1097 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1098 i, cfg->etsrec.prioritytable[i],
1099 cfg->etsrec.tcbwtable[i],
1100 cfg->etsrec.tsatable[i]);
1101 }
1102 dev_info(&pf->pdev->dev,
1103 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1104 cfg->pfc.willing, cfg->pfc.mbc,
1105 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1106 dev_info(&pf->pdev->dev,
1107 "port app_table: num_apps=%d\n", cfg->numapps);
1108 for (i = 0; i < cfg->numapps; i++) {
1109 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1110 i, cfg->app[i].priority,
1111 cfg->app[i].selector,
1112 cfg->app[i].protocolid);
1113 }
1114 /* Peer TLV DCBX data */
1115 dev_info(&pf->pdev->dev,
1116 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1117 r_cfg->etscfg.willing,
1118 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1119 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1120 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1121 i, r_cfg->etscfg.prioritytable[i],
1122 r_cfg->etscfg.tcbwtable[i],
1123 r_cfg->etscfg.tsatable[i]);
1124 }
1125 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1126 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1127 i, r_cfg->etsrec.prioritytable[i],
1128 r_cfg->etsrec.tcbwtable[i],
1129 r_cfg->etsrec.tsatable[i]);
1130 }
1131 dev_info(&pf->pdev->dev,
1132 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1133 r_cfg->pfc.willing,
1134 r_cfg->pfc.mbc,
1135 r_cfg->pfc.pfccap,
1136 r_cfg->pfc.pfcenable);
1137 dev_info(&pf->pdev->dev,
1138 "remote port app_table: num_apps=%d\n",
1139 r_cfg->numapps);
1140 for (i = 0; i < r_cfg->numapps; i++) {
1141 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1142 i, r_cfg->app[i].priority,
1143 r_cfg->app[i].selector,
1144 r_cfg->app[i].protocolid);
1145 }
1146 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1147 int cluster_id, table_id;
1148 int index, ret;
1149 u16 buff_len = 4096;
1150 u32 next_index;
1151 u8 next_table;
1152 u8 *buff;
1153 u16 rlen;
1154
1155 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1156 &cluster_id, &table_id, &index);
1157 if (cnt != 3) {
1158 dev_info(&pf->pdev->dev,
1159 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1160 goto command_write_done;
1161 }
1162
1163 dev_info(&pf->pdev->dev,
1164 "AQ debug dump fwdata params %x %x %x %x\n",
1165 cluster_id, table_id, index, buff_len);
1166 buff = kzalloc(buff_len, GFP_KERNEL);
1167 if (!buff)
1168 goto command_write_done;
1169
1170 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1171 index, buff_len, buff, &rlen,
1172 &next_table, &next_index,
1173 NULL);
1174 if (ret) {
1175 dev_info(&pf->pdev->dev,
1176 "debug dump fwdata AQ Failed %d 0x%x\n",
1177 ret, pf->hw.aq.asq_last_status);
1178 kfree(buff);
1179 buff = NULL;
1180 goto command_write_done;
1181 }
1182 dev_info(&pf->pdev->dev,
1183 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1184 rlen, next_table, next_index);
1185 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1186 DUMP_PREFIX_OFFSET, 16, 1,
1187 buff, rlen, true);
1188 kfree(buff);
1189 buff = NULL;
1190 } else {
1191 dev_info(&pf->pdev->dev,
1192 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1193 dev_info(&pf->pdev->dev, "dump switch\n");
1194 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1195 dev_info(&pf->pdev->dev, "dump reset stats\n");
1196 dev_info(&pf->pdev->dev, "dump port\n");
1197 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1198 dev_info(&pf->pdev->dev,
1199 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1200 }
1201 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1202 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1203 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1204
1205 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1206 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1207 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1208
1209 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1210 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1211 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1212
1213 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1214 u32 address;
1215 u32 value;
1216
1217 cnt = sscanf(&cmd_buf[4], "%i", &address);
1218 if (cnt != 1) {
1219 dev_info(&pf->pdev->dev, "read <reg>\n");
1220 goto command_write_done;
1221 }
1222
1223 /* check the range on address */
1224 if (address > (pf->ioremap_len - sizeof(u32))) {
1225 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1226 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1227 goto command_write_done;
1228 }
1229
1230 value = rd32(&pf->hw, address);
1231 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1232 address, value);
1233
1234 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1235 u32 address, value;
1236
1237 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1238 if (cnt != 2) {
1239 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1240 goto command_write_done;
1241 }
1242
1243 /* check the range on address */
1244 if (address > (pf->ioremap_len - sizeof(u32))) {
1245 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1246 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1247 goto command_write_done;
1248 }
1249 wr32(&pf->hw, address, value);
1250 value = rd32(&pf->hw, address);
1251 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1252 address, value);
1253 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1254 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1255 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1256 if (cnt == 0) {
1257 int i;
1258
1259 for (i = 0; i < pf->num_alloc_vsi; i++)
1260 i40e_vsi_reset_stats(pf->vsi[i]);
1261 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1262 } else if (cnt == 1) {
1263 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1264 if (!vsi) {
1265 dev_info(&pf->pdev->dev,
1266 "clear_stats vsi: bad vsi %d\n",
1267 vsi_seid);
1268 goto command_write_done;
1269 }
1270 i40e_vsi_reset_stats(vsi);
1271 dev_info(&pf->pdev->dev,
1272 "vsi clear stats called for vsi %d\n",
1273 vsi_seid);
1274 } else {
1275 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1276 }
1277 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1278 if (pf->hw.partition_id == 1) {
1279 i40e_pf_reset_stats(pf);
1280 dev_info(&pf->pdev->dev, "port stats cleared\n");
1281 } else {
1282 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1283 }
1284 } else {
1285 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1286 }
1287 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1288 struct i40e_aq_desc *desc;
1289 int ret;
1290
1291 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1292 if (!desc)
1293 goto command_write_done;
1294 cnt = sscanf(&cmd_buf[11],
1295 "%hi %hi %hi %hi %i %i %i %i %i %i",
1296 &desc->flags,
1297 &desc->opcode, &desc->datalen, &desc->retval,
1298 &desc->cookie_high, &desc->cookie_low,
1299 &desc->params.internal.param0,
1300 &desc->params.internal.param1,
1301 &desc->params.internal.param2,
1302 &desc->params.internal.param3);
1303 if (cnt != 10) {
1304 dev_info(&pf->pdev->dev,
1305 "send aq_cmd: bad command string, cnt=%d\n",
1306 cnt);
1307 kfree(desc);
1308 desc = NULL;
1309 goto command_write_done;
1310 }
1311 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
1312 if (!ret) {
1313 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1314 } else if (ret == -EIO) {
1315 dev_info(&pf->pdev->dev,
1316 "AQ command send failed Opcode %x AQ Error: %d\n",
1317 desc->opcode, pf->hw.aq.asq_last_status);
1318 } else {
1319 dev_info(&pf->pdev->dev,
1320 "AQ command send failed Opcode %x Status: %d\n",
1321 desc->opcode, ret);
1322 }
1323 dev_info(&pf->pdev->dev,
1324 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1325 desc->flags, desc->opcode, desc->datalen, desc->retval,
1326 desc->cookie_high, desc->cookie_low,
1327 desc->params.internal.param0,
1328 desc->params.internal.param1,
1329 desc->params.internal.param2,
1330 desc->params.internal.param3);
1331 kfree(desc);
1332 desc = NULL;
1333 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1334 struct i40e_aq_desc *desc;
1335 u16 buffer_len;
1336 u8 *buff;
1337 int ret;
1338
1339 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1340 if (!desc)
1341 goto command_write_done;
1342 cnt = sscanf(&cmd_buf[20],
1343 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1344 &desc->flags,
1345 &desc->opcode, &desc->datalen, &desc->retval,
1346 &desc->cookie_high, &desc->cookie_low,
1347 &desc->params.internal.param0,
1348 &desc->params.internal.param1,
1349 &desc->params.internal.param2,
1350 &desc->params.internal.param3,
1351 &buffer_len);
1352 if (cnt != 11) {
1353 dev_info(&pf->pdev->dev,
1354 "send indirect aq_cmd: bad command string, cnt=%d\n",
1355 cnt);
1356 kfree(desc);
1357 desc = NULL;
1358 goto command_write_done;
1359 }
1360 /* Just stub a buffer big enough in case user messed up */
1361 if (buffer_len == 0)
1362 buffer_len = 1280;
1363
1364 buff = kzalloc(buffer_len, GFP_KERNEL);
1365 if (!buff) {
1366 kfree(desc);
1367 desc = NULL;
1368 goto command_write_done;
1369 }
1370 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1371 ret = i40e_asq_send_command(&pf->hw, desc, buff,
1372 buffer_len, NULL);
1373 if (!ret) {
1374 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1375 } else if (ret == -EIO) {
1376 dev_info(&pf->pdev->dev,
1377 "AQ command send failed Opcode %x AQ Error: %d\n",
1378 desc->opcode, pf->hw.aq.asq_last_status);
1379 } else {
1380 dev_info(&pf->pdev->dev,
1381 "AQ command send failed Opcode %x Status: %d\n",
1382 desc->opcode, ret);
1383 }
1384 dev_info(&pf->pdev->dev,
1385 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1386 desc->flags, desc->opcode, desc->datalen, desc->retval,
1387 desc->cookie_high, desc->cookie_low,
1388 desc->params.internal.param0,
1389 desc->params.internal.param1,
1390 desc->params.internal.param2,
1391 desc->params.internal.param3);
1392 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1393 DUMP_PREFIX_OFFSET, 16, 1,
1394 buff, buffer_len, true);
1395 kfree(buff);
1396 buff = NULL;
1397 kfree(desc);
1398 desc = NULL;
1399 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1400 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1401 i40e_get_current_fd_count(pf));
1402 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1403 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1404 int ret;
1405
1406 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
1407 if (ret) {
1408 dev_info(&pf->pdev->dev,
1409 "Stop LLDP AQ command failed =0x%x\n",
1410 pf->hw.aq.asq_last_status);
1411 goto command_write_done;
1412 }
1413 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1414 pf->hw.mac.addr,
1415 ETH_P_LLDP, 0,
1416 pf->vsi[pf->lan_vsi]->seid,
1417 0, true, NULL, NULL);
1418 if (ret) {
1419 dev_info(&pf->pdev->dev,
1420 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1421 __func__, pf->hw.aq.asq_last_status);
1422 goto command_write_done;
1423 }
1424 #ifdef CONFIG_I40E_DCB
1425 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1426 DCB_CAP_DCBX_VER_IEEE;
1427 #endif /* CONFIG_I40E_DCB */
1428 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1429 int ret;
1430
1431 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1432 pf->hw.mac.addr,
1433 ETH_P_LLDP, 0,
1434 pf->vsi[pf->lan_vsi]->seid,
1435 0, false, NULL, NULL);
1436 if (ret) {
1437 dev_info(&pf->pdev->dev,
1438 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1439 __func__, pf->hw.aq.asq_last_status);
1440 /* Continue and start FW LLDP anyways */
1441 }
1442
1443 ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
1444 if (ret) {
1445 dev_info(&pf->pdev->dev,
1446 "Start LLDP AQ command failed =0x%x\n",
1447 pf->hw.aq.asq_last_status);
1448 goto command_write_done;
1449 }
1450 #ifdef CONFIG_I40E_DCB
1451 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1452 DCB_CAP_DCBX_VER_IEEE;
1453 #endif /* CONFIG_I40E_DCB */
1454 } else if (strncmp(&cmd_buf[5],
1455 "get local", 9) == 0) {
1456 u16 llen, rlen;
1457 int ret;
1458 u8 *buff;
1459
1460 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1461 if (!buff)
1462 goto command_write_done;
1463
1464 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1465 I40E_AQ_LLDP_MIB_LOCAL,
1466 buff, I40E_LLDPDU_SIZE,
1467 &llen, &rlen, NULL);
1468 if (ret) {
1469 dev_info(&pf->pdev->dev,
1470 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1471 pf->hw.aq.asq_last_status);
1472 kfree(buff);
1473 buff = NULL;
1474 goto command_write_done;
1475 }
1476 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1477 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1478 DUMP_PREFIX_OFFSET, 16, 1,
1479 buff, I40E_LLDPDU_SIZE, true);
1480 kfree(buff);
1481 buff = NULL;
1482 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1483 u16 llen, rlen;
1484 int ret;
1485 u8 *buff;
1486
1487 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1488 if (!buff)
1489 goto command_write_done;
1490
1491 ret = i40e_aq_get_lldp_mib(&pf->hw,
1492 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1493 I40E_AQ_LLDP_MIB_REMOTE,
1494 buff, I40E_LLDPDU_SIZE,
1495 &llen, &rlen, NULL);
1496 if (ret) {
1497 dev_info(&pf->pdev->dev,
1498 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1499 pf->hw.aq.asq_last_status);
1500 kfree(buff);
1501 buff = NULL;
1502 goto command_write_done;
1503 }
1504 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1505 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1506 DUMP_PREFIX_OFFSET, 16, 1,
1507 buff, I40E_LLDPDU_SIZE, true);
1508 kfree(buff);
1509 buff = NULL;
1510 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1511 int ret;
1512
1513 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1514 true, NULL);
1515 if (ret) {
1516 dev_info(&pf->pdev->dev,
1517 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1518 pf->hw.aq.asq_last_status);
1519 goto command_write_done;
1520 }
1521 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1522 int ret;
1523
1524 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1525 false, NULL);
1526 if (ret) {
1527 dev_info(&pf->pdev->dev,
1528 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1529 pf->hw.aq.asq_last_status);
1530 goto command_write_done;
1531 }
1532 }
1533 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1534 u16 buffer_len, bytes;
1535 u16 module;
1536 u32 offset;
1537 u16 *buff;
1538 int ret;
1539
1540 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1541 &module, &offset, &buffer_len);
1542 if (cnt == 0) {
1543 module = 0;
1544 offset = 0;
1545 buffer_len = 0;
1546 } else if (cnt == 1) {
1547 offset = 0;
1548 buffer_len = 0;
1549 } else if (cnt == 2) {
1550 buffer_len = 0;
1551 } else if (cnt > 3) {
1552 dev_info(&pf->pdev->dev,
1553 "nvm read: bad command string, cnt=%d\n", cnt);
1554 goto command_write_done;
1555 }
1556
1557 /* set the max length */
1558 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1559
1560 bytes = 2 * buffer_len;
1561
1562 /* read at least 1k bytes, no more than 4kB */
1563 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1564 buff = kzalloc(bytes, GFP_KERNEL);
1565 if (!buff)
1566 goto command_write_done;
1567
1568 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1569 if (ret) {
1570 dev_info(&pf->pdev->dev,
1571 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1572 ret, pf->hw.aq.asq_last_status);
1573 kfree(buff);
1574 goto command_write_done;
1575 }
1576
1577 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1578 bytes, (u8 *)buff, true, NULL);
1579 i40e_release_nvm(&pf->hw);
1580 if (ret) {
1581 dev_info(&pf->pdev->dev,
1582 "Read NVM AQ failed err=%d status=0x%x\n",
1583 ret, pf->hw.aq.asq_last_status);
1584 } else {
1585 dev_info(&pf->pdev->dev,
1586 "Read NVM module=0x%x offset=0x%x words=%d\n",
1587 module, offset, buffer_len);
1588 if (bytes)
1589 print_hex_dump(KERN_INFO, "NVM Dump: ",
1590 DUMP_PREFIX_OFFSET, 16, 2,
1591 buff, bytes, true);
1592 }
1593 kfree(buff);
1594 buff = NULL;
1595 } else {
1596 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1597 dev_info(&pf->pdev->dev, "available commands\n");
1598 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1599 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1600 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1601 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1602 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1603 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1604 dev_info(&pf->pdev->dev, " dump switch\n");
1605 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1606 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1607 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1608 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1609 dev_info(&pf->pdev->dev, " dump desc aq\n");
1610 dev_info(&pf->pdev->dev, " dump reset stats\n");
1611 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1612 dev_info(&pf->pdev->dev, " read <reg>\n");
1613 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1614 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1615 dev_info(&pf->pdev->dev, " clear_stats port\n");
1616 dev_info(&pf->pdev->dev, " pfr\n");
1617 dev_info(&pf->pdev->dev, " corer\n");
1618 dev_info(&pf->pdev->dev, " globr\n");
1619 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1620 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1621 dev_info(&pf->pdev->dev, " fd current cnt");
1622 dev_info(&pf->pdev->dev, " lldp start\n");
1623 dev_info(&pf->pdev->dev, " lldp stop\n");
1624 dev_info(&pf->pdev->dev, " lldp get local\n");
1625 dev_info(&pf->pdev->dev, " lldp get remote\n");
1626 dev_info(&pf->pdev->dev, " lldp event on\n");
1627 dev_info(&pf->pdev->dev, " lldp event off\n");
1628 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1629 }
1630
1631 command_write_done:
1632 kfree(cmd_buf);
1633 cmd_buf = NULL;
1634 return count;
1635 }
1636
1637 static const struct file_operations i40e_dbg_command_fops = {
1638 .owner = THIS_MODULE,
1639 .open = simple_open,
1640 .read = i40e_dbg_command_read,
1641 .write = i40e_dbg_command_write,
1642 };
1643
1644 /**************************************************************
1645 * netdev_ops
1646 * The netdev_ops entry in debugfs is for giving the driver commands
1647 * to be executed from the netdev operations.
1648 **************************************************************/
1649 static char i40e_dbg_netdev_ops_buf[256] = "";
1650
1651 /**
1652 * i40e_dbg_netdev_ops_read - read for netdev_ops datum
1653 * @filp: the opened file
1654 * @buffer: where to write the data for the user to read
1655 * @count: the size of the user's buffer
1656 * @ppos: file position offset
1657 **/
i40e_dbg_netdev_ops_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)1658 static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1659 size_t count, loff_t *ppos)
1660 {
1661 struct i40e_pf *pf = filp->private_data;
1662 int bytes_not_copied;
1663 int buf_size = 256;
1664 char *buf;
1665 int len;
1666
1667 /* don't allow partal reads */
1668 if (*ppos != 0)
1669 return 0;
1670 if (count < buf_size)
1671 return -ENOSPC;
1672
1673 buf = kzalloc(buf_size, GFP_KERNEL);
1674 if (!buf)
1675 return -ENOSPC;
1676
1677 len = snprintf(buf, buf_size, "%s: %s\n",
1678 pf->vsi[pf->lan_vsi]->netdev->name,
1679 i40e_dbg_netdev_ops_buf);
1680
1681 bytes_not_copied = copy_to_user(buffer, buf, len);
1682 kfree(buf);
1683
1684 if (bytes_not_copied)
1685 return -EFAULT;
1686
1687 *ppos = len;
1688 return len;
1689 }
1690
1691 /**
1692 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1693 * @filp: the opened file
1694 * @buffer: where to find the user's data
1695 * @count: the length of the user's data
1696 * @ppos: file position offset
1697 **/
i40e_dbg_netdev_ops_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1698 static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1699 const char __user *buffer,
1700 size_t count, loff_t *ppos)
1701 {
1702 struct i40e_pf *pf = filp->private_data;
1703 int bytes_not_copied;
1704 struct i40e_vsi *vsi;
1705 char *buf_tmp;
1706 int vsi_seid;
1707 int i, cnt;
1708
1709 /* don't allow partial writes */
1710 if (*ppos != 0)
1711 return 0;
1712 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1713 return -ENOSPC;
1714
1715 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1716 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
1717 buffer, count);
1718 if (bytes_not_copied)
1719 return -EFAULT;
1720 i40e_dbg_netdev_ops_buf[count] = '\0';
1721
1722 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1723 if (buf_tmp) {
1724 *buf_tmp = '\0';
1725 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1726 }
1727
1728 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1729 int mtu;
1730
1731 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1732 &vsi_seid, &mtu);
1733 if (cnt != 2) {
1734 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1735 goto netdev_ops_write_done;
1736 }
1737 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1738 if (!vsi) {
1739 dev_info(&pf->pdev->dev,
1740 "change_mtu: VSI %d not found\n", vsi_seid);
1741 } else if (!vsi->netdev) {
1742 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1743 vsi_seid);
1744 } else if (rtnl_trylock()) {
1745 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1746 mtu);
1747 rtnl_unlock();
1748 dev_info(&pf->pdev->dev, "change_mtu called\n");
1749 } else {
1750 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1751 }
1752
1753 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1754 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1755 if (cnt != 1) {
1756 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1757 goto netdev_ops_write_done;
1758 }
1759 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1760 if (!vsi) {
1761 dev_info(&pf->pdev->dev,
1762 "set_rx_mode: VSI %d not found\n", vsi_seid);
1763 } else if (!vsi->netdev) {
1764 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1765 vsi_seid);
1766 } else if (rtnl_trylock()) {
1767 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1768 rtnl_unlock();
1769 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1770 } else {
1771 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1772 }
1773
1774 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1775 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1776 if (cnt != 1) {
1777 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1778 goto netdev_ops_write_done;
1779 }
1780 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1781 if (!vsi) {
1782 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1783 vsi_seid);
1784 } else if (!vsi->netdev) {
1785 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1786 vsi_seid);
1787 } else {
1788 for (i = 0; i < vsi->num_q_vectors; i++)
1789 napi_schedule(&vsi->q_vectors[i]->napi);
1790 dev_info(&pf->pdev->dev, "napi called\n");
1791 }
1792 } else {
1793 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
1794 i40e_dbg_netdev_ops_buf);
1795 dev_info(&pf->pdev->dev, "available commands\n");
1796 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1797 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1798 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1799 }
1800 netdev_ops_write_done:
1801 return count;
1802 }
1803
1804 static const struct file_operations i40e_dbg_netdev_ops_fops = {
1805 .owner = THIS_MODULE,
1806 .open = simple_open,
1807 .read = i40e_dbg_netdev_ops_read,
1808 .write = i40e_dbg_netdev_ops_write,
1809 };
1810
1811 /**
1812 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1813 * @pf: the PF that is starting up
1814 **/
i40e_dbg_pf_init(struct i40e_pf * pf)1815 void i40e_dbg_pf_init(struct i40e_pf *pf)
1816 {
1817 const char *name = pci_name(pf->pdev);
1818
1819 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
1820
1821 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
1822 &i40e_dbg_command_fops);
1823
1824 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
1825 &i40e_dbg_netdev_ops_fops);
1826 }
1827
1828 /**
1829 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1830 * @pf: the PF that is stopping
1831 **/
i40e_dbg_pf_exit(struct i40e_pf * pf)1832 void i40e_dbg_pf_exit(struct i40e_pf *pf)
1833 {
1834 debugfs_remove_recursive(pf->i40e_dbg_pf);
1835 pf->i40e_dbg_pf = NULL;
1836 }
1837
1838 /**
1839 * i40e_dbg_init - start up debugfs for the driver
1840 **/
i40e_dbg_init(void)1841 void i40e_dbg_init(void)
1842 {
1843 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
1844 if (IS_ERR(i40e_dbg_root))
1845 pr_info("init of debugfs failed\n");
1846 }
1847
1848 /**
1849 * i40e_dbg_exit - clean out the driver's debugfs entries
1850 **/
i40e_dbg_exit(void)1851 void i40e_dbg_exit(void)
1852 {
1853 debugfs_remove_recursive(i40e_dbg_root);
1854 i40e_dbg_root = NULL;
1855 }
1856
1857 #endif /* CONFIG_DEBUG_FS */
1858