xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/core.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/core.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/wait.h>
52 #include <linux/string.h>
53 #include <linux/gfp.h>
54 #include <linux/random.h>
55 #include <linux/jiffies.h>
56 #include <linux/mutex.h>
57 #include <linux/rcupdate.h>
58 #include <linux/slab.h>
59 #include <asm/byteorder.h>
60 
61 #include "core.h"
62 #include "item.h"
63 #include "cmd.h"
64 #include "port.h"
65 #include "trap.h"
66 #include "emad.h"
67 #include "reg.h"
68 
69 static LIST_HEAD(mlxsw_core_driver_list);
70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
71 
72 static const char mlxsw_core_driver_name[] = "mlxsw_core";
73 
74 static struct dentry *mlxsw_core_dbg_root;
75 
76 struct mlxsw_core_pcpu_stats {
77 	u64			trap_rx_packets[MLXSW_TRAP_ID_MAX];
78 	u64			trap_rx_bytes[MLXSW_TRAP_ID_MAX];
79 	u64			port_rx_packets[MLXSW_PORT_MAX_PORTS];
80 	u64			port_rx_bytes[MLXSW_PORT_MAX_PORTS];
81 	struct u64_stats_sync	syncp;
82 	u32			trap_rx_dropped[MLXSW_TRAP_ID_MAX];
83 	u32			port_rx_dropped[MLXSW_PORT_MAX_PORTS];
84 	u32			trap_rx_invalid;
85 	u32			port_rx_invalid;
86 };
87 
88 struct mlxsw_core {
89 	struct mlxsw_driver *driver;
90 	const struct mlxsw_bus *bus;
91 	void *bus_priv;
92 	const struct mlxsw_bus_info *bus_info;
93 	struct list_head rx_listener_list;
94 	struct list_head event_listener_list;
95 	struct {
96 		struct sk_buff *resp_skb;
97 		u64 tid;
98 		wait_queue_head_t wait;
99 		bool trans_active;
100 		struct mutex lock; /* One EMAD transaction at a time. */
101 		bool use_emad;
102 	} emad;
103 	struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
104 	struct dentry *dbg_dir;
105 	struct {
106 		struct debugfs_blob_wrapper vsd_blob;
107 		struct debugfs_blob_wrapper psid_blob;
108 	} dbg;
109 	unsigned long driver_priv[0];
110 	/* driver_priv has to be always the last item */
111 };
112 
113 struct mlxsw_rx_listener_item {
114 	struct list_head list;
115 	struct mlxsw_rx_listener rxl;
116 	void *priv;
117 };
118 
119 struct mlxsw_event_listener_item {
120 	struct list_head list;
121 	struct mlxsw_event_listener el;
122 	void *priv;
123 };
124 
125 /******************
126  * EMAD processing
127  ******************/
128 
129 /* emad_eth_hdr_dmac
130  * Destination MAC in EMAD's Ethernet header.
131  * Must be set to 01:02:c9:00:00:01
132  */
133 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
134 
135 /* emad_eth_hdr_smac
136  * Source MAC in EMAD's Ethernet header.
137  * Must be set to 00:02:c9:01:02:03
138  */
139 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
140 
141 /* emad_eth_hdr_ethertype
142  * Ethertype in EMAD's Ethernet header.
143  * Must be set to 0x8932
144  */
145 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
146 
147 /* emad_eth_hdr_mlx_proto
148  * Mellanox protocol.
149  * Must be set to 0x0.
150  */
151 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
152 
153 /* emad_eth_hdr_ver
154  * Mellanox protocol version.
155  * Must be set to 0x0.
156  */
157 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
158 
159 /* emad_op_tlv_type
160  * Type of the TLV.
161  * Must be set to 0x1 (operation TLV).
162  */
163 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
164 
165 /* emad_op_tlv_len
166  * Length of the operation TLV in u32.
167  * Must be set to 0x4.
168  */
169 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
170 
171 /* emad_op_tlv_dr
172  * Direct route bit. Setting to 1 indicates the EMAD is a direct route
173  * EMAD. DR TLV must follow.
174  *
175  * Note: Currently not supported and must not be set.
176  */
177 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
178 
179 /* emad_op_tlv_status
180  * Returned status in case of EMAD response. Must be set to 0 in case
181  * of EMAD request.
182  * 0x0 - success
183  * 0x1 - device is busy. Requester should retry
184  * 0x2 - Mellanox protocol version not supported
185  * 0x3 - unknown TLV
186  * 0x4 - register not supported
187  * 0x5 - operation class not supported
188  * 0x6 - EMAD method not supported
189  * 0x7 - bad parameter (e.g. port out of range)
190  * 0x8 - resource not available
191  * 0x9 - message receipt acknowledgment. Requester should retry
192  * 0x70 - internal error
193  */
194 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
195 
196 /* emad_op_tlv_register_id
197  * Register ID of register within register TLV.
198  */
199 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
200 
201 /* emad_op_tlv_r
202  * Response bit. Setting to 1 indicates Response, otherwise request.
203  */
204 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
205 
206 /* emad_op_tlv_method
207  * EMAD method type.
208  * 0x1 - query
209  * 0x2 - write
210  * 0x3 - send (currently not supported)
211  * 0x4 - event
212  */
213 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
214 
215 /* emad_op_tlv_class
216  * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
217  */
218 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
219 
220 /* emad_op_tlv_tid
221  * EMAD transaction ID. Used for pairing request and response EMADs.
222  */
223 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
224 
225 /* emad_reg_tlv_type
226  * Type of the TLV.
227  * Must be set to 0x3 (register TLV).
228  */
229 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
230 
231 /* emad_reg_tlv_len
232  * Length of the operation TLV in u32.
233  */
234 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
235 
236 /* emad_end_tlv_type
237  * Type of the TLV.
238  * Must be set to 0x0 (end TLV).
239  */
240 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
241 
242 /* emad_end_tlv_len
243  * Length of the end TLV in u32.
244  * Must be set to 1.
245  */
246 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
247 
248 enum mlxsw_core_reg_access_type {
249 	MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
250 	MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
251 };
252 
253 static inline const char *
254 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
255 {
256 	switch (type) {
257 	case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
258 		return "query";
259 	case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
260 		return "write";
261 	}
262 	BUG();
263 }
264 
265 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
266 {
267 	mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
268 	mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
269 }
270 
271 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
272 				    const struct mlxsw_reg_info *reg,
273 				    char *payload)
274 {
275 	mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
276 	mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
277 	memcpy(reg_tlv + sizeof(u32), payload, reg->len);
278 }
279 
280 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
281 				   const struct mlxsw_reg_info *reg,
282 				   enum mlxsw_core_reg_access_type type,
283 				   struct mlxsw_core *mlxsw_core)
284 {
285 	mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
286 	mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
287 	mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
288 	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
289 	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
290 	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
291 	if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
292 		mlxsw_emad_op_tlv_method_set(op_tlv,
293 					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
294 	else
295 		mlxsw_emad_op_tlv_method_set(op_tlv,
296 					     MLXSW_EMAD_OP_TLV_METHOD_WRITE);
297 	mlxsw_emad_op_tlv_class_set(op_tlv,
298 				    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
299 	mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
300 }
301 
302 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
303 {
304 	char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
305 
306 	mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
307 	mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
308 	mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
309 	mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
310 	mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
311 
312 	skb_reset_mac_header(skb);
313 
314 	return 0;
315 }
316 
317 static void mlxsw_emad_construct(struct sk_buff *skb,
318 				 const struct mlxsw_reg_info *reg,
319 				 char *payload,
320 				 enum mlxsw_core_reg_access_type type,
321 				 struct mlxsw_core *mlxsw_core)
322 {
323 	char *buf;
324 
325 	buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
326 	mlxsw_emad_pack_end_tlv(buf);
327 
328 	buf = skb_push(skb, reg->len + sizeof(u32));
329 	mlxsw_emad_pack_reg_tlv(buf, reg, payload);
330 
331 	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
332 	mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
333 
334 	mlxsw_emad_construct_eth_hdr(skb);
335 }
336 
337 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
338 {
339 	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
340 }
341 
342 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
343 {
344 	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
345 				      MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
346 }
347 
348 static char *mlxsw_emad_reg_payload(const char *op_tlv)
349 {
350 	return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
351 }
352 
353 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
354 {
355 	char *op_tlv;
356 
357 	op_tlv = mlxsw_emad_op_tlv(skb);
358 	return mlxsw_emad_op_tlv_tid_get(op_tlv);
359 }
360 
361 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
362 {
363 	char *op_tlv;
364 
365 	op_tlv = mlxsw_emad_op_tlv(skb);
366 	return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
367 }
368 
369 #define MLXSW_EMAD_TIMEOUT_MS 200
370 
371 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
372 				 struct sk_buff *skb,
373 				 const struct mlxsw_tx_info *tx_info)
374 {
375 	int err;
376 	int ret;
377 
378 	err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
379 	if (err) {
380 		dev_warn(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
381 			 mlxsw_core->emad.tid);
382 		dev_kfree_skb(skb);
383 		return err;
384 	}
385 
386 	mlxsw_core->emad.trans_active = true;
387 	ret = wait_event_timeout(mlxsw_core->emad.wait,
388 				 !(mlxsw_core->emad.trans_active),
389 				 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
390 	if (!ret) {
391 		dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
392 			 mlxsw_core->emad.tid);
393 		mlxsw_core->emad.trans_active = false;
394 		return -EIO;
395 	}
396 
397 	return 0;
398 }
399 
400 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
401 				     char *op_tlv)
402 {
403 	enum mlxsw_emad_op_tlv_status status;
404 	u64 tid;
405 
406 	status = mlxsw_emad_op_tlv_status_get(op_tlv);
407 	tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
408 
409 	switch (status) {
410 	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
411 		return 0;
412 	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
413 	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
414 		dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
415 			 tid, status, mlxsw_emad_op_tlv_status_str(status));
416 		return -EAGAIN;
417 	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
418 	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
419 	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
420 	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
421 	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
422 	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
423 	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
424 	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
425 	default:
426 		dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
427 			tid, status, mlxsw_emad_op_tlv_status_str(status));
428 		return -EIO;
429 	}
430 }
431 
432 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
433 					 struct sk_buff *skb)
434 {
435 	return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
436 }
437 
438 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
439 			       struct sk_buff *skb,
440 			       const struct mlxsw_tx_info *tx_info)
441 {
442 	struct sk_buff *trans_skb;
443 	int n_retry;
444 	int err;
445 
446 	n_retry = 0;
447 retry:
448 	/* We copy the EMAD to a new skb, since we might need
449 	 * to retransmit it in case of failure.
450 	 */
451 	trans_skb = skb_copy(skb, GFP_KERNEL);
452 	if (!trans_skb) {
453 		err = -ENOMEM;
454 		goto out;
455 	}
456 
457 	err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
458 	if (!err) {
459 		struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
460 
461 		err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
462 		if (err)
463 			dev_kfree_skb(resp_skb);
464 		if (!err || err != -EAGAIN)
465 			goto out;
466 	}
467 	if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
468 		goto retry;
469 
470 out:
471 	dev_kfree_skb(skb);
472 	mlxsw_core->emad.tid++;
473 	return err;
474 }
475 
476 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
477 					void *priv)
478 {
479 	struct mlxsw_core *mlxsw_core = priv;
480 
481 	if (mlxsw_emad_is_resp(skb) &&
482 	    mlxsw_core->emad.trans_active &&
483 	    mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
484 		mlxsw_core->emad.resp_skb = skb;
485 		mlxsw_core->emad.trans_active = false;
486 		wake_up(&mlxsw_core->emad.wait);
487 	} else {
488 		dev_kfree_skb(skb);
489 	}
490 }
491 
492 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
493 	.func = mlxsw_emad_rx_listener_func,
494 	.local_port = MLXSW_PORT_DONT_CARE,
495 	.trap_id = MLXSW_TRAP_ID_ETHEMAD,
496 };
497 
498 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
499 {
500 	char htgt_pl[MLXSW_REG_HTGT_LEN];
501 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
502 	int err;
503 
504 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
505 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
506 	if (err)
507 		return err;
508 
509 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
510 			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
511 			    MLXSW_TRAP_ID_ETHEMAD);
512 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
513 }
514 
515 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
516 {
517 	int err;
518 
519 	/* Set the upper 32 bits of the transaction ID field to a random
520 	 * number. This allows us to discard EMADs addressed to other
521 	 * devices.
522 	 */
523 	get_random_bytes(&mlxsw_core->emad.tid, 4);
524 	mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
525 
526 	init_waitqueue_head(&mlxsw_core->emad.wait);
527 	mlxsw_core->emad.trans_active = false;
528 	mutex_init(&mlxsw_core->emad.lock);
529 
530 	err = mlxsw_core_rx_listener_register(mlxsw_core,
531 					      &mlxsw_emad_rx_listener,
532 					      mlxsw_core);
533 	if (err)
534 		return err;
535 
536 	err = mlxsw_emad_traps_set(mlxsw_core);
537 	if (err)
538 		goto err_emad_trap_set;
539 
540 	mlxsw_core->emad.use_emad = true;
541 
542 	return 0;
543 
544 err_emad_trap_set:
545 	mlxsw_core_rx_listener_unregister(mlxsw_core,
546 					  &mlxsw_emad_rx_listener,
547 					  mlxsw_core);
548 	return err;
549 }
550 
551 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
552 {
553 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
554 
555 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
556 			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
557 			    MLXSW_TRAP_ID_ETHEMAD);
558 	mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
559 
560 	mlxsw_core_rx_listener_unregister(mlxsw_core,
561 					  &mlxsw_emad_rx_listener,
562 					  mlxsw_core);
563 }
564 
565 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
566 					u16 reg_len)
567 {
568 	struct sk_buff *skb;
569 	u16 emad_len;
570 
571 	emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
572 		    (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
573 		    sizeof(u32) + mlxsw_core->driver->txhdr_len);
574 	if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
575 		return NULL;
576 
577 	skb = netdev_alloc_skb(NULL, emad_len);
578 	if (!skb)
579 		return NULL;
580 	memset(skb->data, 0, emad_len);
581 	skb_reserve(skb, emad_len);
582 
583 	return skb;
584 }
585 
586 /*****************
587  * Core functions
588  *****************/
589 
590 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
591 {
592 	struct mlxsw_core *mlxsw_core = file->private;
593 	struct mlxsw_core_pcpu_stats *p;
594 	u64 rx_packets, rx_bytes;
595 	u64 tmp_rx_packets, tmp_rx_bytes;
596 	u32 rx_dropped, rx_invalid;
597 	unsigned int start;
598 	int i;
599 	int j;
600 	static const char hdr[] =
601 		"     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
602 
603 	seq_printf(file, hdr);
604 	for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
605 		rx_packets = 0;
606 		rx_bytes = 0;
607 		rx_dropped = 0;
608 		for_each_possible_cpu(j) {
609 			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
610 			do {
611 				start = u64_stats_fetch_begin(&p->syncp);
612 				tmp_rx_packets = p->trap_rx_packets[i];
613 				tmp_rx_bytes = p->trap_rx_bytes[i];
614 			} while (u64_stats_fetch_retry(&p->syncp, start));
615 
616 			rx_packets += tmp_rx_packets;
617 			rx_bytes += tmp_rx_bytes;
618 			rx_dropped += p->trap_rx_dropped[i];
619 		}
620 		seq_printf(file, "trap %3d %12llu %12llu %10u\n",
621 			   i, rx_packets, rx_bytes, rx_dropped);
622 	}
623 	rx_invalid = 0;
624 	for_each_possible_cpu(j) {
625 		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
626 		rx_invalid += p->trap_rx_invalid;
627 	}
628 	seq_printf(file, "trap INV                           %10u\n",
629 		   rx_invalid);
630 
631 	for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
632 		rx_packets = 0;
633 		rx_bytes = 0;
634 		rx_dropped = 0;
635 		for_each_possible_cpu(j) {
636 			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
637 			do {
638 				start = u64_stats_fetch_begin(&p->syncp);
639 				tmp_rx_packets = p->port_rx_packets[i];
640 				tmp_rx_bytes = p->port_rx_bytes[i];
641 			} while (u64_stats_fetch_retry(&p->syncp, start));
642 
643 			rx_packets += tmp_rx_packets;
644 			rx_bytes += tmp_rx_bytes;
645 			rx_dropped += p->port_rx_dropped[i];
646 		}
647 		seq_printf(file, "port %3d %12llu %12llu %10u\n",
648 			   i, rx_packets, rx_bytes, rx_dropped);
649 	}
650 	rx_invalid = 0;
651 	for_each_possible_cpu(j) {
652 		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
653 		rx_invalid += p->port_rx_invalid;
654 	}
655 	seq_printf(file, "port INV                           %10u\n",
656 		   rx_invalid);
657 	return 0;
658 }
659 
660 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
661 {
662 	struct mlxsw_core *mlxsw_core = inode->i_private;
663 
664 	return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
665 }
666 
667 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
668 	.owner = THIS_MODULE,
669 	.open = mlxsw_core_rx_stats_dbg_open,
670 	.release = single_release,
671 	.read = seq_read,
672 	.llseek = seq_lseek
673 };
674 
675 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
676 				    const char *buf, size_t size)
677 {
678 	__be32 *m = (__be32 *) buf;
679 	int i;
680 	int count = size / sizeof(__be32);
681 
682 	for (i = count - 1; i >= 0; i--)
683 		if (m[i])
684 			break;
685 	i++;
686 	count = i ? i : 1;
687 	for (i = 0; i < count; i += 4)
688 		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
689 			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
690 			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
691 }
692 
693 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
694 {
695 	spin_lock(&mlxsw_core_driver_list_lock);
696 	list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
697 	spin_unlock(&mlxsw_core_driver_list_lock);
698 	return 0;
699 }
700 EXPORT_SYMBOL(mlxsw_core_driver_register);
701 
702 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
703 {
704 	spin_lock(&mlxsw_core_driver_list_lock);
705 	list_del(&mlxsw_driver->list);
706 	spin_unlock(&mlxsw_core_driver_list_lock);
707 }
708 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
709 
710 static struct mlxsw_driver *__driver_find(const char *kind)
711 {
712 	struct mlxsw_driver *mlxsw_driver;
713 
714 	list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
715 		if (strcmp(mlxsw_driver->kind, kind) == 0)
716 			return mlxsw_driver;
717 	}
718 	return NULL;
719 }
720 
721 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
722 {
723 	struct mlxsw_driver *mlxsw_driver;
724 
725 	spin_lock(&mlxsw_core_driver_list_lock);
726 	mlxsw_driver = __driver_find(kind);
727 	if (!mlxsw_driver) {
728 		spin_unlock(&mlxsw_core_driver_list_lock);
729 		request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
730 		spin_lock(&mlxsw_core_driver_list_lock);
731 		mlxsw_driver = __driver_find(kind);
732 	}
733 	if (mlxsw_driver) {
734 		if (!try_module_get(mlxsw_driver->owner))
735 			mlxsw_driver = NULL;
736 	}
737 
738 	spin_unlock(&mlxsw_core_driver_list_lock);
739 	return mlxsw_driver;
740 }
741 
742 static void mlxsw_core_driver_put(const char *kind)
743 {
744 	struct mlxsw_driver *mlxsw_driver;
745 
746 	spin_lock(&mlxsw_core_driver_list_lock);
747 	mlxsw_driver = __driver_find(kind);
748 	spin_unlock(&mlxsw_core_driver_list_lock);
749 	if (!mlxsw_driver)
750 		return;
751 	module_put(mlxsw_driver->owner);
752 }
753 
754 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
755 {
756 	const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
757 
758 	mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
759 						 mlxsw_core_dbg_root);
760 	if (!mlxsw_core->dbg_dir)
761 		return -ENOMEM;
762 	debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
763 			    mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
764 	mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
765 	mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
766 	debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
767 			    &mlxsw_core->dbg.vsd_blob);
768 	mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
769 	mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
770 	debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
771 			    &mlxsw_core->dbg.psid_blob);
772 	return 0;
773 }
774 
775 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
776 {
777 	debugfs_remove_recursive(mlxsw_core->dbg_dir);
778 }
779 
780 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
781 				   const struct mlxsw_bus *mlxsw_bus,
782 				   void *bus_priv)
783 {
784 	const char *device_kind = mlxsw_bus_info->device_kind;
785 	struct mlxsw_core *mlxsw_core;
786 	struct mlxsw_driver *mlxsw_driver;
787 	size_t alloc_size;
788 	int err;
789 
790 	mlxsw_driver = mlxsw_core_driver_get(device_kind);
791 	if (!mlxsw_driver)
792 		return -EINVAL;
793 	alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
794 	mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
795 	if (!mlxsw_core) {
796 		err = -ENOMEM;
797 		goto err_core_alloc;
798 	}
799 
800 	INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
801 	INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
802 	mlxsw_core->driver = mlxsw_driver;
803 	mlxsw_core->bus = mlxsw_bus;
804 	mlxsw_core->bus_priv = bus_priv;
805 	mlxsw_core->bus_info = mlxsw_bus_info;
806 
807 	mlxsw_core->pcpu_stats =
808 		netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
809 	if (!mlxsw_core->pcpu_stats) {
810 		err = -ENOMEM;
811 		goto err_alloc_stats;
812 	}
813 
814 	err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
815 	if (err)
816 		goto err_bus_init;
817 
818 	err = mlxsw_emad_init(mlxsw_core);
819 	if (err)
820 		goto err_emad_init;
821 
822 	err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
823 				 mlxsw_bus_info);
824 	if (err)
825 		goto err_driver_init;
826 
827 	err = mlxsw_core_debugfs_init(mlxsw_core);
828 	if (err)
829 		goto err_debugfs_init;
830 
831 	return 0;
832 
833 err_debugfs_init:
834 	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
835 err_driver_init:
836 	mlxsw_emad_fini(mlxsw_core);
837 err_emad_init:
838 	mlxsw_bus->fini(bus_priv);
839 err_bus_init:
840 	free_percpu(mlxsw_core->pcpu_stats);
841 err_alloc_stats:
842 	kfree(mlxsw_core);
843 err_core_alloc:
844 	mlxsw_core_driver_put(device_kind);
845 	return err;
846 }
847 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
848 
849 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
850 {
851 	const char *device_kind = mlxsw_core->bus_info->device_kind;
852 
853 	mlxsw_core_debugfs_fini(mlxsw_core);
854 	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
855 	mlxsw_emad_fini(mlxsw_core);
856 	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
857 	free_percpu(mlxsw_core->pcpu_stats);
858 	kfree(mlxsw_core);
859 	mlxsw_core_driver_put(device_kind);
860 }
861 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
862 
863 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
864 {
865 	return container_of(driver_priv, struct mlxsw_core, driver_priv);
866 }
867 
868 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
869 			    const struct mlxsw_tx_info *tx_info)
870 {
871 	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
872 
873 	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
874 					     tx_info);
875 }
876 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
877 
878 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
879 				   const struct mlxsw_rx_listener *rxl_b)
880 {
881 	return (rxl_a->func == rxl_b->func &&
882 		rxl_a->local_port == rxl_b->local_port &&
883 		rxl_a->trap_id == rxl_b->trap_id);
884 }
885 
886 static struct mlxsw_rx_listener_item *
887 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
888 			const struct mlxsw_rx_listener *rxl,
889 			void *priv)
890 {
891 	struct mlxsw_rx_listener_item *rxl_item;
892 
893 	list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
894 		if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
895 		    rxl_item->priv == priv)
896 			return rxl_item;
897 	}
898 	return NULL;
899 }
900 
901 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
902 				    const struct mlxsw_rx_listener *rxl,
903 				    void *priv)
904 {
905 	struct mlxsw_rx_listener_item *rxl_item;
906 
907 	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
908 	if (rxl_item)
909 		return -EEXIST;
910 	rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
911 	if (!rxl_item)
912 		return -ENOMEM;
913 	rxl_item->rxl = *rxl;
914 	rxl_item->priv = priv;
915 
916 	list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
917 	return 0;
918 }
919 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
920 
921 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
922 				       const struct mlxsw_rx_listener *rxl,
923 				       void *priv)
924 {
925 	struct mlxsw_rx_listener_item *rxl_item;
926 
927 	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
928 	if (!rxl_item)
929 		return;
930 	list_del_rcu(&rxl_item->list);
931 	synchronize_rcu();
932 	kfree(rxl_item);
933 }
934 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
935 
936 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
937 					   void *priv)
938 {
939 	struct mlxsw_event_listener_item *event_listener_item = priv;
940 	struct mlxsw_reg_info reg;
941 	char *payload;
942 	char *op_tlv = mlxsw_emad_op_tlv(skb);
943 	char *reg_tlv = mlxsw_emad_reg_tlv(skb);
944 
945 	reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
946 	reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
947 	payload = mlxsw_emad_reg_payload(op_tlv);
948 	event_listener_item->el.func(&reg, payload, event_listener_item->priv);
949 	dev_kfree_skb(skb);
950 }
951 
952 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
953 				      const struct mlxsw_event_listener *el_b)
954 {
955 	return (el_a->func == el_b->func &&
956 		el_a->trap_id == el_b->trap_id);
957 }
958 
959 static struct mlxsw_event_listener_item *
960 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
961 			   const struct mlxsw_event_listener *el,
962 			   void *priv)
963 {
964 	struct mlxsw_event_listener_item *el_item;
965 
966 	list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
967 		if (__is_event_listener_equal(&el_item->el, el) &&
968 		    el_item->priv == priv)
969 			return el_item;
970 	}
971 	return NULL;
972 }
973 
974 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
975 				       const struct mlxsw_event_listener *el,
976 				       void *priv)
977 {
978 	int err;
979 	struct mlxsw_event_listener_item *el_item;
980 	const struct mlxsw_rx_listener rxl = {
981 		.func = mlxsw_core_event_listener_func,
982 		.local_port = MLXSW_PORT_DONT_CARE,
983 		.trap_id = el->trap_id,
984 	};
985 
986 	el_item = __find_event_listener_item(mlxsw_core, el, priv);
987 	if (el_item)
988 		return -EEXIST;
989 	el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
990 	if (!el_item)
991 		return -ENOMEM;
992 	el_item->el = *el;
993 	el_item->priv = priv;
994 
995 	err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
996 	if (err)
997 		goto err_rx_listener_register;
998 
999 	/* No reason to save item if we did not manage to register an RX
1000 	 * listener for it.
1001 	 */
1002 	list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1003 
1004 	return 0;
1005 
1006 err_rx_listener_register:
1007 	kfree(el_item);
1008 	return err;
1009 }
1010 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1011 
1012 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1013 					  const struct mlxsw_event_listener *el,
1014 					  void *priv)
1015 {
1016 	struct mlxsw_event_listener_item *el_item;
1017 	const struct mlxsw_rx_listener rxl = {
1018 		.func = mlxsw_core_event_listener_func,
1019 		.local_port = MLXSW_PORT_DONT_CARE,
1020 		.trap_id = el->trap_id,
1021 	};
1022 
1023 	el_item = __find_event_listener_item(mlxsw_core, el, priv);
1024 	if (!el_item)
1025 		return;
1026 	mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1027 	list_del(&el_item->list);
1028 	kfree(el_item);
1029 }
1030 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1031 
1032 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1033 				      const struct mlxsw_reg_info *reg,
1034 				      char *payload,
1035 				      enum mlxsw_core_reg_access_type type)
1036 {
1037 	int err;
1038 	char *op_tlv;
1039 	struct sk_buff *skb;
1040 	struct mlxsw_tx_info tx_info = {
1041 		.local_port = MLXSW_PORT_CPU_PORT,
1042 		.is_emad = true,
1043 	};
1044 
1045 	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
1046 	if (!skb)
1047 		return -ENOMEM;
1048 
1049 	mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
1050 	mlxsw_core->driver->txhdr_construct(skb, &tx_info);
1051 
1052 	dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
1053 		mlxsw_core->emad.tid);
1054 	mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
1055 
1056 	err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
1057 	if (!err) {
1058 		op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
1059 		memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
1060 		       reg->len);
1061 
1062 		dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
1063 			mlxsw_core->emad.tid - 1);
1064 		mlxsw_core_buf_dump_dbg(mlxsw_core,
1065 					mlxsw_core->emad.resp_skb->data,
1066 					skb->len);
1067 
1068 		dev_kfree_skb(mlxsw_core->emad.resp_skb);
1069 	}
1070 
1071 	return err;
1072 }
1073 
1074 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1075 				     const struct mlxsw_reg_info *reg,
1076 				     char *payload,
1077 				     enum mlxsw_core_reg_access_type type)
1078 {
1079 	int err, n_retry;
1080 	char *in_mbox, *out_mbox, *tmp;
1081 
1082 	in_mbox = mlxsw_cmd_mbox_alloc();
1083 	if (!in_mbox)
1084 		return -ENOMEM;
1085 
1086 	out_mbox = mlxsw_cmd_mbox_alloc();
1087 	if (!out_mbox) {
1088 		err = -ENOMEM;
1089 		goto free_in_mbox;
1090 	}
1091 
1092 	mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
1093 	tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1094 	mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1095 
1096 	n_retry = 0;
1097 retry:
1098 	err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1099 	if (!err) {
1100 		err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
1101 		if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1102 			goto retry;
1103 	}
1104 
1105 	if (!err)
1106 		memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1107 		       reg->len);
1108 
1109 	mlxsw_core->emad.tid++;
1110 	mlxsw_cmd_mbox_free(out_mbox);
1111 free_in_mbox:
1112 	mlxsw_cmd_mbox_free(in_mbox);
1113 	return err;
1114 }
1115 
1116 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1117 				 const struct mlxsw_reg_info *reg,
1118 				 char *payload,
1119 				 enum mlxsw_core_reg_access_type type)
1120 {
1121 	u64 cur_tid;
1122 	int err;
1123 
1124 	if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
1125 		dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1126 			reg->id, mlxsw_reg_id_str(reg->id),
1127 			mlxsw_core_reg_access_type_str(type));
1128 		return -EINTR;
1129 	}
1130 
1131 	cur_tid = mlxsw_core->emad.tid;
1132 	dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1133 		cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1134 		mlxsw_core_reg_access_type_str(type));
1135 
1136 	/* During initialization EMAD interface is not available to us,
1137 	 * so we default to command interface. We switch to EMAD interface
1138 	 * after setting the appropriate traps.
1139 	 */
1140 	if (!mlxsw_core->emad.use_emad)
1141 		err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1142 						payload, type);
1143 	else
1144 		err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1145 						 payload, type);
1146 
1147 	if (err)
1148 		dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1149 			cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1150 			mlxsw_core_reg_access_type_str(type));
1151 
1152 	mutex_unlock(&mlxsw_core->emad.lock);
1153 	return err;
1154 }
1155 
1156 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1157 		    const struct mlxsw_reg_info *reg, char *payload)
1158 {
1159 	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1160 				     MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1161 }
1162 EXPORT_SYMBOL(mlxsw_reg_query);
1163 
1164 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1165 		    const struct mlxsw_reg_info *reg, char *payload)
1166 {
1167 	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1168 				     MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1169 }
1170 EXPORT_SYMBOL(mlxsw_reg_write);
1171 
1172 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1173 			    struct mlxsw_rx_info *rx_info)
1174 {
1175 	struct mlxsw_rx_listener_item *rxl_item;
1176 	const struct mlxsw_rx_listener *rxl;
1177 	struct mlxsw_core_pcpu_stats *pcpu_stats;
1178 	u8 local_port = rx_info->sys_port;
1179 	bool found = false;
1180 
1181 	dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
1182 			    __func__, rx_info->sys_port, rx_info->trap_id);
1183 
1184 	if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1185 	    (local_port >= MLXSW_PORT_MAX_PORTS))
1186 		goto drop;
1187 
1188 	rcu_read_lock();
1189 	list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1190 		rxl = &rxl_item->rxl;
1191 		if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1192 		     rxl->local_port == local_port) &&
1193 		    rxl->trap_id == rx_info->trap_id) {
1194 			found = true;
1195 			break;
1196 		}
1197 	}
1198 	rcu_read_unlock();
1199 	if (!found)
1200 		goto drop;
1201 
1202 	pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1203 	u64_stats_update_begin(&pcpu_stats->syncp);
1204 	pcpu_stats->port_rx_packets[local_port]++;
1205 	pcpu_stats->port_rx_bytes[local_port] += skb->len;
1206 	pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1207 	pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1208 	u64_stats_update_end(&pcpu_stats->syncp);
1209 
1210 	rxl->func(skb, local_port, rxl_item->priv);
1211 	return;
1212 
1213 drop:
1214 	if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1215 		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1216 	else
1217 		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1218 	if (local_port >= MLXSW_PORT_MAX_PORTS)
1219 		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1220 	else
1221 		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1222 	dev_kfree_skb(skb);
1223 }
1224 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1225 
1226 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1227 		   u32 in_mod, bool out_mbox_direct,
1228 		   char *in_mbox, size_t in_mbox_size,
1229 		   char *out_mbox, size_t out_mbox_size)
1230 {
1231 	u8 status;
1232 	int err;
1233 
1234 	BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1235 	if (!mlxsw_core->bus->cmd_exec)
1236 		return -EOPNOTSUPP;
1237 
1238 	dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1239 		opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1240 	if (in_mbox) {
1241 		dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1242 		mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1243 	}
1244 
1245 	err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1246 					opcode_mod, in_mod, out_mbox_direct,
1247 					in_mbox, in_mbox_size,
1248 					out_mbox, out_mbox_size, &status);
1249 
1250 	if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1251 		dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1252 			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1253 			in_mod, status, mlxsw_cmd_status_str(status));
1254 	} else if (err == -ETIMEDOUT) {
1255 		dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1256 			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1257 			in_mod);
1258 	}
1259 
1260 	if (!err && out_mbox) {
1261 		dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1262 		mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1263 	}
1264 	return err;
1265 }
1266 EXPORT_SYMBOL(mlxsw_cmd_exec);
1267 
1268 static int __init mlxsw_core_module_init(void)
1269 {
1270 	mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1271 	if (!mlxsw_core_dbg_root)
1272 		return -ENOMEM;
1273 	return 0;
1274 }
1275 
1276 static void __exit mlxsw_core_module_exit(void)
1277 {
1278 	debugfs_remove_recursive(mlxsw_core_dbg_root);
1279 }
1280 
1281 module_init(mlxsw_core_module_init);
1282 module_exit(mlxsw_core_module_exit);
1283 
1284 MODULE_LICENSE("Dual BSD/GPL");
1285 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1286 MODULE_DESCRIPTION("Mellanox switch device core driver");
1287