1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/core.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/device.h> 40 #include <linux/export.h> 41 #include <linux/err.h> 42 #include <linux/if_link.h> 43 #include <linux/debugfs.h> 44 #include <linux/seq_file.h> 45 #include <linux/u64_stats_sync.h> 46 #include <linux/netdevice.h> 47 #include <linux/wait.h> 48 #include <linux/skbuff.h> 49 #include <linux/etherdevice.h> 50 #include <linux/types.h> 51 #include <linux/string.h> 52 #include <linux/gfp.h> 53 #include <linux/random.h> 54 #include <linux/jiffies.h> 55 #include <linux/mutex.h> 56 #include <linux/rcupdate.h> 57 #include <linux/slab.h> 58 #include <asm/byteorder.h> 59 60 #include "core.h" 61 #include "item.h" 62 #include "cmd.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "emad.h" 66 #include "reg.h" 67 68 static LIST_HEAD(mlxsw_core_driver_list); 69 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 70 71 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 72 73 static struct dentry *mlxsw_core_dbg_root; 74 75 struct mlxsw_core_pcpu_stats { 76 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; 77 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; 78 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; 79 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; 80 struct u64_stats_sync syncp; 81 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; 82 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; 83 u32 trap_rx_invalid; 84 u32 port_rx_invalid; 85 }; 86 87 struct mlxsw_core { 88 struct mlxsw_driver *driver; 89 const struct mlxsw_bus *bus; 90 void *bus_priv; 91 const struct mlxsw_bus_info *bus_info; 92 struct list_head rx_listener_list; 93 struct list_head event_listener_list; 94 struct { 95 struct sk_buff *resp_skb; 96 u64 tid; 97 wait_queue_head_t wait; 98 bool trans_active; 99 struct mutex lock; /* One EMAD transaction at a time. */ 100 bool use_emad; 101 } emad; 102 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; 103 struct dentry *dbg_dir; 104 struct { 105 struct debugfs_blob_wrapper vsd_blob; 106 struct debugfs_blob_wrapper psid_blob; 107 } dbg; 108 unsigned long driver_priv[0]; 109 /* driver_priv has to be always the last item */ 110 }; 111 112 struct mlxsw_rx_listener_item { 113 struct list_head list; 114 struct mlxsw_rx_listener rxl; 115 void *priv; 116 }; 117 118 struct mlxsw_event_listener_item { 119 struct list_head list; 120 struct mlxsw_event_listener el; 121 void *priv; 122 }; 123 124 /****************** 125 * EMAD processing 126 ******************/ 127 128 /* emad_eth_hdr_dmac 129 * Destination MAC in EMAD's Ethernet header. 130 * Must be set to 01:02:c9:00:00:01 131 */ 132 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 133 134 /* emad_eth_hdr_smac 135 * Source MAC in EMAD's Ethernet header. 136 * Must be set to 00:02:c9:01:02:03 137 */ 138 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 139 140 /* emad_eth_hdr_ethertype 141 * Ethertype in EMAD's Ethernet header. 142 * Must be set to 0x8932 143 */ 144 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 145 146 /* emad_eth_hdr_mlx_proto 147 * Mellanox protocol. 148 * Must be set to 0x0. 149 */ 150 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 151 152 /* emad_eth_hdr_ver 153 * Mellanox protocol version. 154 * Must be set to 0x0. 155 */ 156 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 157 158 /* emad_op_tlv_type 159 * Type of the TLV. 160 * Must be set to 0x1 (operation TLV). 161 */ 162 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 163 164 /* emad_op_tlv_len 165 * Length of the operation TLV in u32. 166 * Must be set to 0x4. 167 */ 168 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 169 170 /* emad_op_tlv_dr 171 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 172 * EMAD. DR TLV must follow. 173 * 174 * Note: Currently not supported and must not be set. 175 */ 176 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 177 178 /* emad_op_tlv_status 179 * Returned status in case of EMAD response. Must be set to 0 in case 180 * of EMAD request. 181 * 0x0 - success 182 * 0x1 - device is busy. Requester should retry 183 * 0x2 - Mellanox protocol version not supported 184 * 0x3 - unknown TLV 185 * 0x4 - register not supported 186 * 0x5 - operation class not supported 187 * 0x6 - EMAD method not supported 188 * 0x7 - bad parameter (e.g. port out of range) 189 * 0x8 - resource not available 190 * 0x9 - message receipt acknowledgment. Requester should retry 191 * 0x70 - internal error 192 */ 193 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 194 195 /* emad_op_tlv_register_id 196 * Register ID of register within register TLV. 197 */ 198 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 199 200 /* emad_op_tlv_r 201 * Response bit. Setting to 1 indicates Response, otherwise request. 202 */ 203 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 204 205 /* emad_op_tlv_method 206 * EMAD method type. 207 * 0x1 - query 208 * 0x2 - write 209 * 0x3 - send (currently not supported) 210 * 0x4 - event 211 */ 212 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 213 214 /* emad_op_tlv_class 215 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 216 */ 217 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 218 219 /* emad_op_tlv_tid 220 * EMAD transaction ID. Used for pairing request and response EMADs. 221 */ 222 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 223 224 /* emad_reg_tlv_type 225 * Type of the TLV. 226 * Must be set to 0x3 (register TLV). 227 */ 228 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 229 230 /* emad_reg_tlv_len 231 * Length of the operation TLV in u32. 232 */ 233 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 234 235 /* emad_end_tlv_type 236 * Type of the TLV. 237 * Must be set to 0x0 (end TLV). 238 */ 239 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 240 241 /* emad_end_tlv_len 242 * Length of the end TLV in u32. 243 * Must be set to 1. 244 */ 245 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 246 247 enum mlxsw_core_reg_access_type { 248 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 249 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 250 }; 251 252 static inline const char * 253 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 254 { 255 switch (type) { 256 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 257 return "query"; 258 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 259 return "write"; 260 } 261 BUG(); 262 } 263 264 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 265 { 266 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 267 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 268 } 269 270 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 271 const struct mlxsw_reg_info *reg, 272 char *payload) 273 { 274 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 275 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 276 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 277 } 278 279 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 280 const struct mlxsw_reg_info *reg, 281 enum mlxsw_core_reg_access_type type, 282 struct mlxsw_core *mlxsw_core) 283 { 284 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 285 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 286 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 287 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 288 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 289 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 290 if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) 291 mlxsw_emad_op_tlv_method_set(op_tlv, 292 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 293 else 294 mlxsw_emad_op_tlv_method_set(op_tlv, 295 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 296 mlxsw_emad_op_tlv_class_set(op_tlv, 297 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 298 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); 299 } 300 301 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 302 { 303 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 304 305 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 306 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 307 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 308 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 309 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 310 311 skb_reset_mac_header(skb); 312 313 return 0; 314 } 315 316 static void mlxsw_emad_construct(struct sk_buff *skb, 317 const struct mlxsw_reg_info *reg, 318 char *payload, 319 enum mlxsw_core_reg_access_type type, 320 struct mlxsw_core *mlxsw_core) 321 { 322 char *buf; 323 324 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 325 mlxsw_emad_pack_end_tlv(buf); 326 327 buf = skb_push(skb, reg->len + sizeof(u32)); 328 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 329 330 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 331 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); 332 333 mlxsw_emad_construct_eth_hdr(skb); 334 } 335 336 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 337 { 338 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 339 } 340 341 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 342 { 343 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 344 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 345 } 346 347 static char *mlxsw_emad_reg_payload(const char *op_tlv) 348 { 349 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 350 } 351 352 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 353 { 354 char *op_tlv; 355 356 op_tlv = mlxsw_emad_op_tlv(skb); 357 return mlxsw_emad_op_tlv_tid_get(op_tlv); 358 } 359 360 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 361 { 362 char *op_tlv; 363 364 op_tlv = mlxsw_emad_op_tlv(skb); 365 return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); 366 } 367 368 #define MLXSW_EMAD_TIMEOUT_MS 200 369 370 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 371 struct sk_buff *skb, 372 const struct mlxsw_tx_info *tx_info) 373 { 374 int err; 375 int ret; 376 377 mlxsw_core->emad.trans_active = true; 378 379 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); 380 if (err) { 381 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", 382 mlxsw_core->emad.tid); 383 dev_kfree_skb(skb); 384 goto trans_inactive_out; 385 } 386 387 ret = wait_event_timeout(mlxsw_core->emad.wait, 388 !(mlxsw_core->emad.trans_active), 389 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); 390 if (!ret) { 391 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", 392 mlxsw_core->emad.tid); 393 err = -EIO; 394 goto trans_inactive_out; 395 } 396 397 return 0; 398 399 trans_inactive_out: 400 mlxsw_core->emad.trans_active = false; 401 return err; 402 } 403 404 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, 405 char *op_tlv) 406 { 407 enum mlxsw_emad_op_tlv_status status; 408 u64 tid; 409 410 status = mlxsw_emad_op_tlv_status_get(op_tlv); 411 tid = mlxsw_emad_op_tlv_tid_get(op_tlv); 412 413 switch (status) { 414 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 415 return 0; 416 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 417 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 418 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", 419 tid, status, mlxsw_emad_op_tlv_status_str(status)); 420 return -EAGAIN; 421 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 422 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 423 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 424 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 425 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 426 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 427 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 428 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 429 default: 430 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", 431 tid, status, mlxsw_emad_op_tlv_status_str(status)); 432 return -EIO; 433 } 434 } 435 436 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, 437 struct sk_buff *skb) 438 { 439 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); 440 } 441 442 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 443 struct sk_buff *skb, 444 const struct mlxsw_tx_info *tx_info) 445 { 446 struct sk_buff *trans_skb; 447 int n_retry; 448 int err; 449 450 n_retry = 0; 451 retry: 452 /* We copy the EMAD to a new skb, since we might need 453 * to retransmit it in case of failure. 454 */ 455 trans_skb = skb_copy(skb, GFP_KERNEL); 456 if (!trans_skb) { 457 err = -ENOMEM; 458 goto out; 459 } 460 461 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); 462 if (!err) { 463 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; 464 465 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); 466 if (err) 467 dev_kfree_skb(resp_skb); 468 if (!err || err != -EAGAIN) 469 goto out; 470 } 471 if (n_retry++ < MLXSW_EMAD_MAX_RETRY) 472 goto retry; 473 474 out: 475 dev_kfree_skb(skb); 476 mlxsw_core->emad.tid++; 477 return err; 478 } 479 480 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 481 void *priv) 482 { 483 struct mlxsw_core *mlxsw_core = priv; 484 485 if (mlxsw_emad_is_resp(skb) && 486 mlxsw_core->emad.trans_active && 487 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { 488 mlxsw_core->emad.resp_skb = skb; 489 mlxsw_core->emad.trans_active = false; 490 wake_up(&mlxsw_core->emad.wait); 491 } else { 492 dev_kfree_skb(skb); 493 } 494 } 495 496 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { 497 .func = mlxsw_emad_rx_listener_func, 498 .local_port = MLXSW_PORT_DONT_CARE, 499 .trap_id = MLXSW_TRAP_ID_ETHEMAD, 500 }; 501 502 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) 503 { 504 char htgt_pl[MLXSW_REG_HTGT_LEN]; 505 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 506 int err; 507 508 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD); 509 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 510 if (err) 511 return err; 512 513 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 514 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 515 MLXSW_TRAP_ID_ETHEMAD); 516 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 517 } 518 519 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 520 { 521 int err; 522 523 /* Set the upper 32 bits of the transaction ID field to a random 524 * number. This allows us to discard EMADs addressed to other 525 * devices. 526 */ 527 get_random_bytes(&mlxsw_core->emad.tid, 4); 528 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; 529 530 init_waitqueue_head(&mlxsw_core->emad.wait); 531 mlxsw_core->emad.trans_active = false; 532 mutex_init(&mlxsw_core->emad.lock); 533 534 err = mlxsw_core_rx_listener_register(mlxsw_core, 535 &mlxsw_emad_rx_listener, 536 mlxsw_core); 537 if (err) 538 return err; 539 540 err = mlxsw_emad_traps_set(mlxsw_core); 541 if (err) 542 goto err_emad_trap_set; 543 544 mlxsw_core->emad.use_emad = true; 545 546 return 0; 547 548 err_emad_trap_set: 549 mlxsw_core_rx_listener_unregister(mlxsw_core, 550 &mlxsw_emad_rx_listener, 551 mlxsw_core); 552 return err; 553 } 554 555 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 556 { 557 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 558 559 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 560 MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 561 MLXSW_TRAP_ID_ETHEMAD); 562 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 563 564 mlxsw_core_rx_listener_unregister(mlxsw_core, 565 &mlxsw_emad_rx_listener, 566 mlxsw_core); 567 } 568 569 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 570 u16 reg_len) 571 { 572 struct sk_buff *skb; 573 u16 emad_len; 574 575 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 576 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 577 sizeof(u32) + mlxsw_core->driver->txhdr_len); 578 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 579 return NULL; 580 581 skb = netdev_alloc_skb(NULL, emad_len); 582 if (!skb) 583 return NULL; 584 memset(skb->data, 0, emad_len); 585 skb_reserve(skb, emad_len); 586 587 return skb; 588 } 589 590 /***************** 591 * Core functions 592 *****************/ 593 594 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) 595 { 596 struct mlxsw_core *mlxsw_core = file->private; 597 struct mlxsw_core_pcpu_stats *p; 598 u64 rx_packets, rx_bytes; 599 u64 tmp_rx_packets, tmp_rx_bytes; 600 u32 rx_dropped, rx_invalid; 601 unsigned int start; 602 int i; 603 int j; 604 static const char hdr[] = 605 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; 606 607 seq_printf(file, hdr); 608 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { 609 rx_packets = 0; 610 rx_bytes = 0; 611 rx_dropped = 0; 612 for_each_possible_cpu(j) { 613 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 614 do { 615 start = u64_stats_fetch_begin(&p->syncp); 616 tmp_rx_packets = p->trap_rx_packets[i]; 617 tmp_rx_bytes = p->trap_rx_bytes[i]; 618 } while (u64_stats_fetch_retry(&p->syncp, start)); 619 620 rx_packets += tmp_rx_packets; 621 rx_bytes += tmp_rx_bytes; 622 rx_dropped += p->trap_rx_dropped[i]; 623 } 624 seq_printf(file, "trap %3d %12llu %12llu %10u\n", 625 i, rx_packets, rx_bytes, rx_dropped); 626 } 627 rx_invalid = 0; 628 for_each_possible_cpu(j) { 629 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 630 rx_invalid += p->trap_rx_invalid; 631 } 632 seq_printf(file, "trap INV %10u\n", 633 rx_invalid); 634 635 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { 636 rx_packets = 0; 637 rx_bytes = 0; 638 rx_dropped = 0; 639 for_each_possible_cpu(j) { 640 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 641 do { 642 start = u64_stats_fetch_begin(&p->syncp); 643 tmp_rx_packets = p->port_rx_packets[i]; 644 tmp_rx_bytes = p->port_rx_bytes[i]; 645 } while (u64_stats_fetch_retry(&p->syncp, start)); 646 647 rx_packets += tmp_rx_packets; 648 rx_bytes += tmp_rx_bytes; 649 rx_dropped += p->port_rx_dropped[i]; 650 } 651 seq_printf(file, "port %3d %12llu %12llu %10u\n", 652 i, rx_packets, rx_bytes, rx_dropped); 653 } 654 rx_invalid = 0; 655 for_each_possible_cpu(j) { 656 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); 657 rx_invalid += p->port_rx_invalid; 658 } 659 seq_printf(file, "port INV %10u\n", 660 rx_invalid); 661 return 0; 662 } 663 664 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) 665 { 666 struct mlxsw_core *mlxsw_core = inode->i_private; 667 668 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); 669 } 670 671 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { 672 .owner = THIS_MODULE, 673 .open = mlxsw_core_rx_stats_dbg_open, 674 .release = single_release, 675 .read = seq_read, 676 .llseek = seq_lseek 677 }; 678 679 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 680 const char *buf, size_t size) 681 { 682 __be32 *m = (__be32 *) buf; 683 int i; 684 int count = size / sizeof(__be32); 685 686 for (i = count - 1; i >= 0; i--) 687 if (m[i]) 688 break; 689 i++; 690 count = i ? i : 1; 691 for (i = 0; i < count; i += 4) 692 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 693 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 694 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 695 } 696 697 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 698 { 699 spin_lock(&mlxsw_core_driver_list_lock); 700 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 701 spin_unlock(&mlxsw_core_driver_list_lock); 702 return 0; 703 } 704 EXPORT_SYMBOL(mlxsw_core_driver_register); 705 706 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 707 { 708 spin_lock(&mlxsw_core_driver_list_lock); 709 list_del(&mlxsw_driver->list); 710 spin_unlock(&mlxsw_core_driver_list_lock); 711 } 712 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 713 714 static struct mlxsw_driver *__driver_find(const char *kind) 715 { 716 struct mlxsw_driver *mlxsw_driver; 717 718 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 719 if (strcmp(mlxsw_driver->kind, kind) == 0) 720 return mlxsw_driver; 721 } 722 return NULL; 723 } 724 725 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 726 { 727 struct mlxsw_driver *mlxsw_driver; 728 729 spin_lock(&mlxsw_core_driver_list_lock); 730 mlxsw_driver = __driver_find(kind); 731 if (!mlxsw_driver) { 732 spin_unlock(&mlxsw_core_driver_list_lock); 733 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind); 734 spin_lock(&mlxsw_core_driver_list_lock); 735 mlxsw_driver = __driver_find(kind); 736 } 737 if (mlxsw_driver) { 738 if (!try_module_get(mlxsw_driver->owner)) 739 mlxsw_driver = NULL; 740 } 741 742 spin_unlock(&mlxsw_core_driver_list_lock); 743 return mlxsw_driver; 744 } 745 746 static void mlxsw_core_driver_put(const char *kind) 747 { 748 struct mlxsw_driver *mlxsw_driver; 749 750 spin_lock(&mlxsw_core_driver_list_lock); 751 mlxsw_driver = __driver_find(kind); 752 spin_unlock(&mlxsw_core_driver_list_lock); 753 if (!mlxsw_driver) 754 return; 755 module_put(mlxsw_driver->owner); 756 } 757 758 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) 759 { 760 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; 761 762 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, 763 mlxsw_core_dbg_root); 764 if (!mlxsw_core->dbg_dir) 765 return -ENOMEM; 766 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, 767 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); 768 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; 769 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); 770 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, 771 &mlxsw_core->dbg.vsd_blob); 772 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; 773 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); 774 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, 775 &mlxsw_core->dbg.psid_blob); 776 return 0; 777 } 778 779 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) 780 { 781 debugfs_remove_recursive(mlxsw_core->dbg_dir); 782 } 783 784 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 785 const struct mlxsw_bus *mlxsw_bus, 786 void *bus_priv) 787 { 788 const char *device_kind = mlxsw_bus_info->device_kind; 789 struct mlxsw_core *mlxsw_core; 790 struct mlxsw_driver *mlxsw_driver; 791 size_t alloc_size; 792 int err; 793 794 mlxsw_driver = mlxsw_core_driver_get(device_kind); 795 if (!mlxsw_driver) 796 return -EINVAL; 797 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 798 mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); 799 if (!mlxsw_core) { 800 err = -ENOMEM; 801 goto err_core_alloc; 802 } 803 804 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 805 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 806 mlxsw_core->driver = mlxsw_driver; 807 mlxsw_core->bus = mlxsw_bus; 808 mlxsw_core->bus_priv = bus_priv; 809 mlxsw_core->bus_info = mlxsw_bus_info; 810 811 mlxsw_core->pcpu_stats = 812 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); 813 if (!mlxsw_core->pcpu_stats) { 814 err = -ENOMEM; 815 goto err_alloc_stats; 816 } 817 818 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); 819 if (err) 820 goto err_bus_init; 821 822 err = mlxsw_emad_init(mlxsw_core); 823 if (err) 824 goto err_emad_init; 825 826 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, 827 mlxsw_bus_info); 828 if (err) 829 goto err_driver_init; 830 831 err = mlxsw_core_debugfs_init(mlxsw_core); 832 if (err) 833 goto err_debugfs_init; 834 835 return 0; 836 837 err_debugfs_init: 838 mlxsw_core->driver->fini(mlxsw_core->driver_priv); 839 err_driver_init: 840 mlxsw_emad_fini(mlxsw_core); 841 err_emad_init: 842 mlxsw_bus->fini(bus_priv); 843 err_bus_init: 844 free_percpu(mlxsw_core->pcpu_stats); 845 err_alloc_stats: 846 kfree(mlxsw_core); 847 err_core_alloc: 848 mlxsw_core_driver_put(device_kind); 849 return err; 850 } 851 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 852 853 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) 854 { 855 const char *device_kind = mlxsw_core->bus_info->device_kind; 856 857 mlxsw_core_debugfs_fini(mlxsw_core); 858 mlxsw_core->driver->fini(mlxsw_core->driver_priv); 859 mlxsw_emad_fini(mlxsw_core); 860 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 861 free_percpu(mlxsw_core->pcpu_stats); 862 kfree(mlxsw_core); 863 mlxsw_core_driver_put(device_kind); 864 } 865 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 866 867 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) 868 { 869 return container_of(driver_priv, struct mlxsw_core, driver_priv); 870 } 871 872 bool mlxsw_core_skb_transmit_busy(void *driver_priv, 873 const struct mlxsw_tx_info *tx_info) 874 { 875 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); 876 877 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 878 tx_info); 879 } 880 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 881 882 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, 883 const struct mlxsw_tx_info *tx_info) 884 { 885 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); 886 887 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 888 tx_info); 889 } 890 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 891 892 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 893 const struct mlxsw_rx_listener *rxl_b) 894 { 895 return (rxl_a->func == rxl_b->func && 896 rxl_a->local_port == rxl_b->local_port && 897 rxl_a->trap_id == rxl_b->trap_id); 898 } 899 900 static struct mlxsw_rx_listener_item * 901 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 902 const struct mlxsw_rx_listener *rxl, 903 void *priv) 904 { 905 struct mlxsw_rx_listener_item *rxl_item; 906 907 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 908 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 909 rxl_item->priv == priv) 910 return rxl_item; 911 } 912 return NULL; 913 } 914 915 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 916 const struct mlxsw_rx_listener *rxl, 917 void *priv) 918 { 919 struct mlxsw_rx_listener_item *rxl_item; 920 921 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 922 if (rxl_item) 923 return -EEXIST; 924 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 925 if (!rxl_item) 926 return -ENOMEM; 927 rxl_item->rxl = *rxl; 928 rxl_item->priv = priv; 929 930 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 931 return 0; 932 } 933 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 934 935 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 936 const struct mlxsw_rx_listener *rxl, 937 void *priv) 938 { 939 struct mlxsw_rx_listener_item *rxl_item; 940 941 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 942 if (!rxl_item) 943 return; 944 list_del_rcu(&rxl_item->list); 945 synchronize_rcu(); 946 kfree(rxl_item); 947 } 948 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 949 950 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 951 void *priv) 952 { 953 struct mlxsw_event_listener_item *event_listener_item = priv; 954 struct mlxsw_reg_info reg; 955 char *payload; 956 char *op_tlv = mlxsw_emad_op_tlv(skb); 957 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 958 959 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 960 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 961 payload = mlxsw_emad_reg_payload(op_tlv); 962 event_listener_item->el.func(®, payload, event_listener_item->priv); 963 dev_kfree_skb(skb); 964 } 965 966 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 967 const struct mlxsw_event_listener *el_b) 968 { 969 return (el_a->func == el_b->func && 970 el_a->trap_id == el_b->trap_id); 971 } 972 973 static struct mlxsw_event_listener_item * 974 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 975 const struct mlxsw_event_listener *el, 976 void *priv) 977 { 978 struct mlxsw_event_listener_item *el_item; 979 980 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 981 if (__is_event_listener_equal(&el_item->el, el) && 982 el_item->priv == priv) 983 return el_item; 984 } 985 return NULL; 986 } 987 988 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 989 const struct mlxsw_event_listener *el, 990 void *priv) 991 { 992 int err; 993 struct mlxsw_event_listener_item *el_item; 994 const struct mlxsw_rx_listener rxl = { 995 .func = mlxsw_core_event_listener_func, 996 .local_port = MLXSW_PORT_DONT_CARE, 997 .trap_id = el->trap_id, 998 }; 999 1000 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1001 if (el_item) 1002 return -EEXIST; 1003 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1004 if (!el_item) 1005 return -ENOMEM; 1006 el_item->el = *el; 1007 el_item->priv = priv; 1008 1009 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1010 if (err) 1011 goto err_rx_listener_register; 1012 1013 /* No reason to save item if we did not manage to register an RX 1014 * listener for it. 1015 */ 1016 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1017 1018 return 0; 1019 1020 err_rx_listener_register: 1021 kfree(el_item); 1022 return err; 1023 } 1024 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1025 1026 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1027 const struct mlxsw_event_listener *el, 1028 void *priv) 1029 { 1030 struct mlxsw_event_listener_item *el_item; 1031 const struct mlxsw_rx_listener rxl = { 1032 .func = mlxsw_core_event_listener_func, 1033 .local_port = MLXSW_PORT_DONT_CARE, 1034 .trap_id = el->trap_id, 1035 }; 1036 1037 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1038 if (!el_item) 1039 return; 1040 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1041 list_del(&el_item->list); 1042 kfree(el_item); 1043 } 1044 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1045 1046 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1047 const struct mlxsw_reg_info *reg, 1048 char *payload, 1049 enum mlxsw_core_reg_access_type type) 1050 { 1051 int err; 1052 char *op_tlv; 1053 struct sk_buff *skb; 1054 struct mlxsw_tx_info tx_info = { 1055 .local_port = MLXSW_PORT_CPU_PORT, 1056 .is_emad = true, 1057 }; 1058 1059 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 1060 if (!skb) 1061 return -ENOMEM; 1062 1063 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); 1064 mlxsw_core->driver->txhdr_construct(skb, &tx_info); 1065 1066 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", 1067 mlxsw_core->emad.tid); 1068 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); 1069 1070 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); 1071 if (!err) { 1072 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); 1073 memcpy(payload, mlxsw_emad_reg_payload(op_tlv), 1074 reg->len); 1075 1076 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", 1077 mlxsw_core->emad.tid - 1); 1078 mlxsw_core_buf_dump_dbg(mlxsw_core, 1079 mlxsw_core->emad.resp_skb->data, 1080 mlxsw_core->emad.resp_skb->len); 1081 1082 dev_kfree_skb(mlxsw_core->emad.resp_skb); 1083 } 1084 1085 return err; 1086 } 1087 1088 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1089 const struct mlxsw_reg_info *reg, 1090 char *payload, 1091 enum mlxsw_core_reg_access_type type) 1092 { 1093 int err, n_retry; 1094 char *in_mbox, *out_mbox, *tmp; 1095 1096 in_mbox = mlxsw_cmd_mbox_alloc(); 1097 if (!in_mbox) 1098 return -ENOMEM; 1099 1100 out_mbox = mlxsw_cmd_mbox_alloc(); 1101 if (!out_mbox) { 1102 err = -ENOMEM; 1103 goto free_in_mbox; 1104 } 1105 1106 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); 1107 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1108 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1109 1110 n_retry = 0; 1111 retry: 1112 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); 1113 if (!err) { 1114 err = mlxsw_emad_process_status(mlxsw_core, out_mbox); 1115 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1116 goto retry; 1117 } 1118 1119 if (!err) 1120 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1121 reg->len); 1122 1123 mlxsw_core->emad.tid++; 1124 mlxsw_cmd_mbox_free(out_mbox); 1125 free_in_mbox: 1126 mlxsw_cmd_mbox_free(in_mbox); 1127 return err; 1128 } 1129 1130 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1131 const struct mlxsw_reg_info *reg, 1132 char *payload, 1133 enum mlxsw_core_reg_access_type type) 1134 { 1135 u64 cur_tid; 1136 int err; 1137 1138 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { 1139 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", 1140 reg->id, mlxsw_reg_id_str(reg->id), 1141 mlxsw_core_reg_access_type_str(type)); 1142 return -EINTR; 1143 } 1144 1145 cur_tid = mlxsw_core->emad.tid; 1146 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 1147 cur_tid, reg->id, mlxsw_reg_id_str(reg->id), 1148 mlxsw_core_reg_access_type_str(type)); 1149 1150 /* During initialization EMAD interface is not available to us, 1151 * so we default to command interface. We switch to EMAD interface 1152 * after setting the appropriate traps. 1153 */ 1154 if (!mlxsw_core->emad.use_emad) 1155 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1156 payload, type); 1157 else 1158 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1159 payload, type); 1160 1161 if (err) 1162 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", 1163 cur_tid, reg->id, mlxsw_reg_id_str(reg->id), 1164 mlxsw_core_reg_access_type_str(type)); 1165 1166 mutex_unlock(&mlxsw_core->emad.lock); 1167 return err; 1168 } 1169 1170 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1171 const struct mlxsw_reg_info *reg, char *payload) 1172 { 1173 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1174 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1175 } 1176 EXPORT_SYMBOL(mlxsw_reg_query); 1177 1178 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1179 const struct mlxsw_reg_info *reg, char *payload) 1180 { 1181 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1182 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1183 } 1184 EXPORT_SYMBOL(mlxsw_reg_write); 1185 1186 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1187 struct mlxsw_rx_info *rx_info) 1188 { 1189 struct mlxsw_rx_listener_item *rxl_item; 1190 const struct mlxsw_rx_listener *rxl; 1191 struct mlxsw_core_pcpu_stats *pcpu_stats; 1192 u8 local_port = rx_info->sys_port; 1193 bool found = false; 1194 1195 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", 1196 __func__, rx_info->sys_port, rx_info->trap_id); 1197 1198 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1199 (local_port >= MLXSW_PORT_MAX_PORTS)) 1200 goto drop; 1201 1202 rcu_read_lock(); 1203 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1204 rxl = &rxl_item->rxl; 1205 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1206 rxl->local_port == local_port) && 1207 rxl->trap_id == rx_info->trap_id) { 1208 found = true; 1209 break; 1210 } 1211 } 1212 rcu_read_unlock(); 1213 if (!found) 1214 goto drop; 1215 1216 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); 1217 u64_stats_update_begin(&pcpu_stats->syncp); 1218 pcpu_stats->port_rx_packets[local_port]++; 1219 pcpu_stats->port_rx_bytes[local_port] += skb->len; 1220 pcpu_stats->trap_rx_packets[rx_info->trap_id]++; 1221 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; 1222 u64_stats_update_end(&pcpu_stats->syncp); 1223 1224 rxl->func(skb, local_port, rxl_item->priv); 1225 return; 1226 1227 drop: 1228 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) 1229 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); 1230 else 1231 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); 1232 if (local_port >= MLXSW_PORT_MAX_PORTS) 1233 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); 1234 else 1235 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); 1236 dev_kfree_skb(skb); 1237 } 1238 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1239 1240 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1241 u32 in_mod, bool out_mbox_direct, 1242 char *in_mbox, size_t in_mbox_size, 1243 char *out_mbox, size_t out_mbox_size) 1244 { 1245 u8 status; 1246 int err; 1247 1248 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1249 if (!mlxsw_core->bus->cmd_exec) 1250 return -EOPNOTSUPP; 1251 1252 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1253 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1254 if (in_mbox) { 1255 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1256 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1257 } 1258 1259 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1260 opcode_mod, in_mod, out_mbox_direct, 1261 in_mbox, in_mbox_size, 1262 out_mbox, out_mbox_size, &status); 1263 1264 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1265 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1266 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1267 in_mod, status, mlxsw_cmd_status_str(status)); 1268 } else if (err == -ETIMEDOUT) { 1269 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1270 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1271 in_mod); 1272 } 1273 1274 if (!err && out_mbox) { 1275 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1276 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1277 } 1278 return err; 1279 } 1280 EXPORT_SYMBOL(mlxsw_cmd_exec); 1281 1282 static int __init mlxsw_core_module_init(void) 1283 { 1284 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); 1285 if (!mlxsw_core_dbg_root) 1286 return -ENOMEM; 1287 return 0; 1288 } 1289 1290 static void __exit mlxsw_core_module_exit(void) 1291 { 1292 debugfs_remove_recursive(mlxsw_core_dbg_root); 1293 } 1294 1295 module_init(mlxsw_core_module_init); 1296 module_exit(mlxsw_core_module_exit); 1297 1298 MODULE_LICENSE("Dual BSD/GPL"); 1299 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1300 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1301