1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <asm/byteorder.h> 24 #include <net/devlink.h> 25 #include <trace/events/devlink.h> 26 27 #include "core.h" 28 #include "item.h" 29 #include "cmd.h" 30 #include "port.h" 31 #include "trap.h" 32 #include "emad.h" 33 #include "reg.h" 34 #include "resources.h" 35 36 static LIST_HEAD(mlxsw_core_driver_list); 37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 38 39 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 40 41 static struct workqueue_struct *mlxsw_wq; 42 static struct workqueue_struct *mlxsw_owq; 43 44 struct mlxsw_core_port { 45 struct devlink_port devlink_port; 46 void *port_driver_priv; 47 u8 local_port; 48 }; 49 50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 51 { 52 return mlxsw_core_port->port_driver_priv; 53 } 54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 55 56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 57 { 58 return mlxsw_core_port->port_driver_priv != NULL; 59 } 60 61 struct mlxsw_core { 62 struct mlxsw_driver *driver; 63 const struct mlxsw_bus *bus; 64 void *bus_priv; 65 const struct mlxsw_bus_info *bus_info; 66 struct workqueue_struct *emad_wq; 67 struct list_head rx_listener_list; 68 struct list_head event_listener_list; 69 struct { 70 atomic64_t tid; 71 struct list_head trans_list; 72 spinlock_t trans_list_lock; /* protects trans_list writes */ 73 bool use_emad; 74 } emad; 75 struct { 76 u8 *mapping; /* lag_id+port_index to local_port mapping */ 77 } lag; 78 struct mlxsw_res res; 79 struct mlxsw_hwmon *hwmon; 80 struct mlxsw_thermal *thermal; 81 struct mlxsw_core_port *ports; 82 unsigned int max_ports; 83 bool reload_fail; 84 unsigned long driver_priv[0]; 85 /* driver_priv has to be always the last item */ 86 }; 87 88 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 89 90 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) 91 { 92 /* Switch ports are numbered from 1 to queried value */ 93 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 94 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 95 MAX_SYSTEM_PORT) + 1; 96 else 97 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 98 99 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 100 sizeof(struct mlxsw_core_port), GFP_KERNEL); 101 if (!mlxsw_core->ports) 102 return -ENOMEM; 103 104 return 0; 105 } 106 107 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) 108 { 109 kfree(mlxsw_core->ports); 110 } 111 112 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 113 { 114 return mlxsw_core->max_ports; 115 } 116 EXPORT_SYMBOL(mlxsw_core_max_ports); 117 118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 119 { 120 return mlxsw_core->driver_priv; 121 } 122 EXPORT_SYMBOL(mlxsw_core_driver_priv); 123 124 struct mlxsw_rx_listener_item { 125 struct list_head list; 126 struct mlxsw_rx_listener rxl; 127 void *priv; 128 }; 129 130 struct mlxsw_event_listener_item { 131 struct list_head list; 132 struct mlxsw_event_listener el; 133 void *priv; 134 }; 135 136 /****************** 137 * EMAD processing 138 ******************/ 139 140 /* emad_eth_hdr_dmac 141 * Destination MAC in EMAD's Ethernet header. 142 * Must be set to 01:02:c9:00:00:01 143 */ 144 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 145 146 /* emad_eth_hdr_smac 147 * Source MAC in EMAD's Ethernet header. 148 * Must be set to 00:02:c9:01:02:03 149 */ 150 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 151 152 /* emad_eth_hdr_ethertype 153 * Ethertype in EMAD's Ethernet header. 154 * Must be set to 0x8932 155 */ 156 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 157 158 /* emad_eth_hdr_mlx_proto 159 * Mellanox protocol. 160 * Must be set to 0x0. 161 */ 162 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 163 164 /* emad_eth_hdr_ver 165 * Mellanox protocol version. 166 * Must be set to 0x0. 167 */ 168 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 169 170 /* emad_op_tlv_type 171 * Type of the TLV. 172 * Must be set to 0x1 (operation TLV). 173 */ 174 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 175 176 /* emad_op_tlv_len 177 * Length of the operation TLV in u32. 178 * Must be set to 0x4. 179 */ 180 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 181 182 /* emad_op_tlv_dr 183 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 184 * EMAD. DR TLV must follow. 185 * 186 * Note: Currently not supported and must not be set. 187 */ 188 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 189 190 /* emad_op_tlv_status 191 * Returned status in case of EMAD response. Must be set to 0 in case 192 * of EMAD request. 193 * 0x0 - success 194 * 0x1 - device is busy. Requester should retry 195 * 0x2 - Mellanox protocol version not supported 196 * 0x3 - unknown TLV 197 * 0x4 - register not supported 198 * 0x5 - operation class not supported 199 * 0x6 - EMAD method not supported 200 * 0x7 - bad parameter (e.g. port out of range) 201 * 0x8 - resource not available 202 * 0x9 - message receipt acknowledgment. Requester should retry 203 * 0x70 - internal error 204 */ 205 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 206 207 /* emad_op_tlv_register_id 208 * Register ID of register within register TLV. 209 */ 210 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 211 212 /* emad_op_tlv_r 213 * Response bit. Setting to 1 indicates Response, otherwise request. 214 */ 215 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 216 217 /* emad_op_tlv_method 218 * EMAD method type. 219 * 0x1 - query 220 * 0x2 - write 221 * 0x3 - send (currently not supported) 222 * 0x4 - event 223 */ 224 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 225 226 /* emad_op_tlv_class 227 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 228 */ 229 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 230 231 /* emad_op_tlv_tid 232 * EMAD transaction ID. Used for pairing request and response EMADs. 233 */ 234 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 235 236 /* emad_reg_tlv_type 237 * Type of the TLV. 238 * Must be set to 0x3 (register TLV). 239 */ 240 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 241 242 /* emad_reg_tlv_len 243 * Length of the operation TLV in u32. 244 */ 245 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 246 247 /* emad_end_tlv_type 248 * Type of the TLV. 249 * Must be set to 0x0 (end TLV). 250 */ 251 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 252 253 /* emad_end_tlv_len 254 * Length of the end TLV in u32. 255 * Must be set to 1. 256 */ 257 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 258 259 enum mlxsw_core_reg_access_type { 260 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 261 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 262 }; 263 264 static inline const char * 265 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 266 { 267 switch (type) { 268 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 269 return "query"; 270 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 271 return "write"; 272 } 273 BUG(); 274 } 275 276 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 277 { 278 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 279 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 280 } 281 282 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 283 const struct mlxsw_reg_info *reg, 284 char *payload) 285 { 286 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 287 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 288 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 289 } 290 291 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 292 const struct mlxsw_reg_info *reg, 293 enum mlxsw_core_reg_access_type type, 294 u64 tid) 295 { 296 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 297 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 298 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 299 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 300 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 301 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 302 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 303 mlxsw_emad_op_tlv_method_set(op_tlv, 304 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 305 else 306 mlxsw_emad_op_tlv_method_set(op_tlv, 307 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 308 mlxsw_emad_op_tlv_class_set(op_tlv, 309 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 310 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 311 } 312 313 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 314 { 315 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 316 317 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 318 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 319 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 320 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 321 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 322 323 skb_reset_mac_header(skb); 324 325 return 0; 326 } 327 328 static void mlxsw_emad_construct(struct sk_buff *skb, 329 const struct mlxsw_reg_info *reg, 330 char *payload, 331 enum mlxsw_core_reg_access_type type, 332 u64 tid) 333 { 334 char *buf; 335 336 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 337 mlxsw_emad_pack_end_tlv(buf); 338 339 buf = skb_push(skb, reg->len + sizeof(u32)); 340 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 341 342 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 343 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 344 345 mlxsw_emad_construct_eth_hdr(skb); 346 } 347 348 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 349 { 350 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 351 } 352 353 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 354 { 355 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 356 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 357 } 358 359 static char *mlxsw_emad_reg_payload(const char *op_tlv) 360 { 361 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 362 } 363 364 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 365 { 366 char *op_tlv; 367 368 op_tlv = mlxsw_emad_op_tlv(skb); 369 return mlxsw_emad_op_tlv_tid_get(op_tlv); 370 } 371 372 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 373 { 374 char *op_tlv; 375 376 op_tlv = mlxsw_emad_op_tlv(skb); 377 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 378 } 379 380 static int mlxsw_emad_process_status(char *op_tlv, 381 enum mlxsw_emad_op_tlv_status *p_status) 382 { 383 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 384 385 switch (*p_status) { 386 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 387 return 0; 388 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 389 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 390 return -EAGAIN; 391 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 392 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 393 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 394 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 395 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 396 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 397 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 398 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 399 default: 400 return -EIO; 401 } 402 } 403 404 static int 405 mlxsw_emad_process_status_skb(struct sk_buff *skb, 406 enum mlxsw_emad_op_tlv_status *p_status) 407 { 408 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 409 } 410 411 struct mlxsw_reg_trans { 412 struct list_head list; 413 struct list_head bulk_list; 414 struct mlxsw_core *core; 415 struct sk_buff *tx_skb; 416 struct mlxsw_tx_info tx_info; 417 struct delayed_work timeout_dw; 418 unsigned int retries; 419 u64 tid; 420 struct completion completion; 421 atomic_t active; 422 mlxsw_reg_trans_cb_t *cb; 423 unsigned long cb_priv; 424 const struct mlxsw_reg_info *reg; 425 enum mlxsw_core_reg_access_type type; 426 int err; 427 enum mlxsw_emad_op_tlv_status emad_status; 428 struct rcu_head rcu; 429 }; 430 431 #define MLXSW_EMAD_TIMEOUT_MS 200 432 433 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 434 { 435 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 436 437 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); 438 } 439 440 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 441 struct mlxsw_reg_trans *trans) 442 { 443 struct sk_buff *skb; 444 int err; 445 446 skb = skb_copy(trans->tx_skb, GFP_KERNEL); 447 if (!skb) 448 return -ENOMEM; 449 450 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 451 skb->data + mlxsw_core->driver->txhdr_len, 452 skb->len - mlxsw_core->driver->txhdr_len); 453 454 atomic_set(&trans->active, 1); 455 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 456 if (err) { 457 dev_kfree_skb(skb); 458 return err; 459 } 460 mlxsw_emad_trans_timeout_schedule(trans); 461 return 0; 462 } 463 464 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 465 { 466 struct mlxsw_core *mlxsw_core = trans->core; 467 468 dev_kfree_skb(trans->tx_skb); 469 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 470 list_del_rcu(&trans->list); 471 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 472 trans->err = err; 473 complete(&trans->completion); 474 } 475 476 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 477 struct mlxsw_reg_trans *trans) 478 { 479 int err; 480 481 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 482 trans->retries++; 483 err = mlxsw_emad_transmit(trans->core, trans); 484 if (err == 0) 485 return; 486 } else { 487 err = -EIO; 488 } 489 mlxsw_emad_trans_finish(trans, err); 490 } 491 492 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 493 { 494 struct mlxsw_reg_trans *trans = container_of(work, 495 struct mlxsw_reg_trans, 496 timeout_dw.work); 497 498 if (!atomic_dec_and_test(&trans->active)) 499 return; 500 501 mlxsw_emad_transmit_retry(trans->core, trans); 502 } 503 504 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 505 struct mlxsw_reg_trans *trans, 506 struct sk_buff *skb) 507 { 508 int err; 509 510 if (!atomic_dec_and_test(&trans->active)) 511 return; 512 513 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 514 if (err == -EAGAIN) { 515 mlxsw_emad_transmit_retry(mlxsw_core, trans); 516 } else { 517 if (err == 0) { 518 char *op_tlv = mlxsw_emad_op_tlv(skb); 519 520 if (trans->cb) 521 trans->cb(mlxsw_core, 522 mlxsw_emad_reg_payload(op_tlv), 523 trans->reg->len, trans->cb_priv); 524 } 525 mlxsw_emad_trans_finish(trans, err); 526 } 527 } 528 529 /* called with rcu read lock held */ 530 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 531 void *priv) 532 { 533 struct mlxsw_core *mlxsw_core = priv; 534 struct mlxsw_reg_trans *trans; 535 536 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 537 skb->data, skb->len); 538 539 if (!mlxsw_emad_is_resp(skb)) 540 goto free_skb; 541 542 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 543 if (mlxsw_emad_get_tid(skb) == trans->tid) { 544 mlxsw_emad_process_response(mlxsw_core, trans, skb); 545 break; 546 } 547 } 548 549 free_skb: 550 dev_kfree_skb(skb); 551 } 552 553 static const struct mlxsw_listener mlxsw_emad_rx_listener = 554 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 555 EMAD, DISCARD); 556 557 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 558 { 559 struct workqueue_struct *emad_wq; 560 u64 tid; 561 int err; 562 563 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 564 return 0; 565 566 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); 567 if (!emad_wq) 568 return -ENOMEM; 569 mlxsw_core->emad_wq = emad_wq; 570 571 /* Set the upper 32 bits of the transaction ID field to a random 572 * number. This allows us to discard EMADs addressed to other 573 * devices. 574 */ 575 get_random_bytes(&tid, 4); 576 tid <<= 32; 577 atomic64_set(&mlxsw_core->emad.tid, tid); 578 579 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 580 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 581 582 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 583 mlxsw_core); 584 if (err) 585 return err; 586 587 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 588 if (err) 589 goto err_emad_trap_set; 590 mlxsw_core->emad.use_emad = true; 591 592 return 0; 593 594 err_emad_trap_set: 595 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 596 mlxsw_core); 597 destroy_workqueue(mlxsw_core->emad_wq); 598 return err; 599 } 600 601 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 602 { 603 604 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 605 return; 606 607 mlxsw_core->emad.use_emad = false; 608 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 609 mlxsw_core); 610 destroy_workqueue(mlxsw_core->emad_wq); 611 } 612 613 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 614 u16 reg_len) 615 { 616 struct sk_buff *skb; 617 u16 emad_len; 618 619 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 620 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 621 sizeof(u32) + mlxsw_core->driver->txhdr_len); 622 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 623 return NULL; 624 625 skb = netdev_alloc_skb(NULL, emad_len); 626 if (!skb) 627 return NULL; 628 memset(skb->data, 0, emad_len); 629 skb_reserve(skb, emad_len); 630 631 return skb; 632 } 633 634 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 635 const struct mlxsw_reg_info *reg, 636 char *payload, 637 enum mlxsw_core_reg_access_type type, 638 struct mlxsw_reg_trans *trans, 639 struct list_head *bulk_list, 640 mlxsw_reg_trans_cb_t *cb, 641 unsigned long cb_priv, u64 tid) 642 { 643 struct sk_buff *skb; 644 int err; 645 646 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 647 tid, reg->id, mlxsw_reg_id_str(reg->id), 648 mlxsw_core_reg_access_type_str(type)); 649 650 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 651 if (!skb) 652 return -ENOMEM; 653 654 list_add_tail(&trans->bulk_list, bulk_list); 655 trans->core = mlxsw_core; 656 trans->tx_skb = skb; 657 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 658 trans->tx_info.is_emad = true; 659 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 660 trans->tid = tid; 661 init_completion(&trans->completion); 662 trans->cb = cb; 663 trans->cb_priv = cb_priv; 664 trans->reg = reg; 665 trans->type = type; 666 667 mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 668 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 669 670 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 671 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 672 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 673 err = mlxsw_emad_transmit(mlxsw_core, trans); 674 if (err) 675 goto err_out; 676 return 0; 677 678 err_out: 679 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 680 list_del_rcu(&trans->list); 681 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 682 list_del(&trans->bulk_list); 683 dev_kfree_skb(trans->tx_skb); 684 return err; 685 } 686 687 /***************** 688 * Core functions 689 *****************/ 690 691 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 692 { 693 spin_lock(&mlxsw_core_driver_list_lock); 694 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 695 spin_unlock(&mlxsw_core_driver_list_lock); 696 return 0; 697 } 698 EXPORT_SYMBOL(mlxsw_core_driver_register); 699 700 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 701 { 702 spin_lock(&mlxsw_core_driver_list_lock); 703 list_del(&mlxsw_driver->list); 704 spin_unlock(&mlxsw_core_driver_list_lock); 705 } 706 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 707 708 static struct mlxsw_driver *__driver_find(const char *kind) 709 { 710 struct mlxsw_driver *mlxsw_driver; 711 712 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 713 if (strcmp(mlxsw_driver->kind, kind) == 0) 714 return mlxsw_driver; 715 } 716 return NULL; 717 } 718 719 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 720 { 721 struct mlxsw_driver *mlxsw_driver; 722 723 spin_lock(&mlxsw_core_driver_list_lock); 724 mlxsw_driver = __driver_find(kind); 725 spin_unlock(&mlxsw_core_driver_list_lock); 726 return mlxsw_driver; 727 } 728 729 static int mlxsw_devlink_port_split(struct devlink *devlink, 730 unsigned int port_index, 731 unsigned int count, 732 struct netlink_ext_ack *extack) 733 { 734 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 735 736 if (port_index >= mlxsw_core->max_ports) { 737 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 738 return -EINVAL; 739 } 740 if (!mlxsw_core->driver->port_split) 741 return -EOPNOTSUPP; 742 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, 743 extack); 744 } 745 746 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 747 unsigned int port_index, 748 struct netlink_ext_ack *extack) 749 { 750 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 751 752 if (port_index >= mlxsw_core->max_ports) { 753 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 754 return -EINVAL; 755 } 756 if (!mlxsw_core->driver->port_unsplit) 757 return -EOPNOTSUPP; 758 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, 759 extack); 760 } 761 762 static int 763 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 764 unsigned int sb_index, u16 pool_index, 765 struct devlink_sb_pool_info *pool_info) 766 { 767 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 768 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 769 770 if (!mlxsw_driver->sb_pool_get) 771 return -EOPNOTSUPP; 772 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 773 pool_index, pool_info); 774 } 775 776 static int 777 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 778 unsigned int sb_index, u16 pool_index, u32 size, 779 enum devlink_sb_threshold_type threshold_type) 780 { 781 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 782 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 783 784 if (!mlxsw_driver->sb_pool_set) 785 return -EOPNOTSUPP; 786 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 787 pool_index, size, threshold_type); 788 } 789 790 static void *__dl_port(struct devlink_port *devlink_port) 791 { 792 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 793 } 794 795 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 796 enum devlink_port_type port_type) 797 { 798 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 799 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 800 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 801 802 if (!mlxsw_driver->port_type_set) 803 return -EOPNOTSUPP; 804 805 return mlxsw_driver->port_type_set(mlxsw_core, 806 mlxsw_core_port->local_port, 807 port_type); 808 } 809 810 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 811 unsigned int sb_index, u16 pool_index, 812 u32 *p_threshold) 813 { 814 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 815 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 816 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 817 818 if (!mlxsw_driver->sb_port_pool_get || 819 !mlxsw_core_port_check(mlxsw_core_port)) 820 return -EOPNOTSUPP; 821 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 822 pool_index, p_threshold); 823 } 824 825 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 826 unsigned int sb_index, u16 pool_index, 827 u32 threshold) 828 { 829 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 830 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 831 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 832 833 if (!mlxsw_driver->sb_port_pool_set || 834 !mlxsw_core_port_check(mlxsw_core_port)) 835 return -EOPNOTSUPP; 836 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 837 pool_index, threshold); 838 } 839 840 static int 841 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 842 unsigned int sb_index, u16 tc_index, 843 enum devlink_sb_pool_type pool_type, 844 u16 *p_pool_index, u32 *p_threshold) 845 { 846 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 847 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 848 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 849 850 if (!mlxsw_driver->sb_tc_pool_bind_get || 851 !mlxsw_core_port_check(mlxsw_core_port)) 852 return -EOPNOTSUPP; 853 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 854 tc_index, pool_type, 855 p_pool_index, p_threshold); 856 } 857 858 static int 859 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 860 unsigned int sb_index, u16 tc_index, 861 enum devlink_sb_pool_type pool_type, 862 u16 pool_index, u32 threshold) 863 { 864 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 865 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 866 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 867 868 if (!mlxsw_driver->sb_tc_pool_bind_set || 869 !mlxsw_core_port_check(mlxsw_core_port)) 870 return -EOPNOTSUPP; 871 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 872 tc_index, pool_type, 873 pool_index, threshold); 874 } 875 876 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 877 unsigned int sb_index) 878 { 879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 881 882 if (!mlxsw_driver->sb_occ_snapshot) 883 return -EOPNOTSUPP; 884 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 885 } 886 887 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 888 unsigned int sb_index) 889 { 890 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 891 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 892 893 if (!mlxsw_driver->sb_occ_max_clear) 894 return -EOPNOTSUPP; 895 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 896 } 897 898 static int 899 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 900 unsigned int sb_index, u16 pool_index, 901 u32 *p_cur, u32 *p_max) 902 { 903 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 904 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 905 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 906 907 if (!mlxsw_driver->sb_occ_port_pool_get || 908 !mlxsw_core_port_check(mlxsw_core_port)) 909 return -EOPNOTSUPP; 910 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 911 pool_index, p_cur, p_max); 912 } 913 914 static int 915 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 916 unsigned int sb_index, u16 tc_index, 917 enum devlink_sb_pool_type pool_type, 918 u32 *p_cur, u32 *p_max) 919 { 920 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 921 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 922 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 923 924 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 925 !mlxsw_core_port_check(mlxsw_core_port)) 926 return -EOPNOTSUPP; 927 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 928 sb_index, tc_index, 929 pool_type, p_cur, p_max); 930 } 931 932 static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, 933 struct netlink_ext_ack *extack) 934 { 935 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 936 int err; 937 938 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 939 return -EOPNOTSUPP; 940 941 mlxsw_core_bus_device_unregister(mlxsw_core, true); 942 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, 943 mlxsw_core->bus, 944 mlxsw_core->bus_priv, true, 945 devlink); 946 mlxsw_core->reload_fail = !!err; 947 948 return err; 949 } 950 951 static const struct devlink_ops mlxsw_devlink_ops = { 952 .reload = mlxsw_devlink_core_bus_device_reload, 953 .port_type_set = mlxsw_devlink_port_type_set, 954 .port_split = mlxsw_devlink_port_split, 955 .port_unsplit = mlxsw_devlink_port_unsplit, 956 .sb_pool_get = mlxsw_devlink_sb_pool_get, 957 .sb_pool_set = mlxsw_devlink_sb_pool_set, 958 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 959 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 960 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 961 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 962 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 963 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 964 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 965 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 966 }; 967 968 static int 969 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 970 const struct mlxsw_bus *mlxsw_bus, 971 void *bus_priv, bool reload, 972 struct devlink *devlink) 973 { 974 const char *device_kind = mlxsw_bus_info->device_kind; 975 struct mlxsw_core *mlxsw_core; 976 struct mlxsw_driver *mlxsw_driver; 977 struct mlxsw_res *res; 978 size_t alloc_size; 979 int err; 980 981 mlxsw_driver = mlxsw_core_driver_get(device_kind); 982 if (!mlxsw_driver) 983 return -EINVAL; 984 985 if (!reload) { 986 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 987 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); 988 if (!devlink) { 989 err = -ENOMEM; 990 goto err_devlink_alloc; 991 } 992 } 993 994 mlxsw_core = devlink_priv(devlink); 995 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 996 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 997 mlxsw_core->driver = mlxsw_driver; 998 mlxsw_core->bus = mlxsw_bus; 999 mlxsw_core->bus_priv = bus_priv; 1000 mlxsw_core->bus_info = mlxsw_bus_info; 1001 1002 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; 1003 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); 1004 if (err) 1005 goto err_bus_init; 1006 1007 if (mlxsw_driver->resources_register && !reload) { 1008 err = mlxsw_driver->resources_register(mlxsw_core); 1009 if (err) 1010 goto err_register_resources; 1011 } 1012 1013 err = mlxsw_ports_init(mlxsw_core); 1014 if (err) 1015 goto err_ports_init; 1016 1017 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 1018 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 1019 alloc_size = sizeof(u8) * 1020 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 1021 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 1022 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 1023 if (!mlxsw_core->lag.mapping) { 1024 err = -ENOMEM; 1025 goto err_alloc_lag_mapping; 1026 } 1027 } 1028 1029 err = mlxsw_emad_init(mlxsw_core); 1030 if (err) 1031 goto err_emad_init; 1032 1033 if (!reload) { 1034 err = devlink_register(devlink, mlxsw_bus_info->dev); 1035 if (err) 1036 goto err_devlink_register; 1037 } 1038 1039 if (mlxsw_driver->params_register && !reload) { 1040 err = mlxsw_driver->params_register(mlxsw_core); 1041 if (err) 1042 goto err_register_params; 1043 } 1044 1045 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 1046 if (err) 1047 goto err_hwmon_init; 1048 1049 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 1050 &mlxsw_core->thermal); 1051 if (err) 1052 goto err_thermal_init; 1053 1054 if (mlxsw_driver->init) { 1055 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); 1056 if (err) 1057 goto err_driver_init; 1058 } 1059 1060 return 0; 1061 1062 err_driver_init: 1063 mlxsw_thermal_fini(mlxsw_core->thermal); 1064 err_thermal_init: 1065 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1066 err_hwmon_init: 1067 if (mlxsw_driver->params_unregister && !reload) 1068 mlxsw_driver->params_unregister(mlxsw_core); 1069 err_register_params: 1070 if (!reload) 1071 devlink_unregister(devlink); 1072 err_devlink_register: 1073 mlxsw_emad_fini(mlxsw_core); 1074 err_emad_init: 1075 kfree(mlxsw_core->lag.mapping); 1076 err_alloc_lag_mapping: 1077 mlxsw_ports_fini(mlxsw_core); 1078 err_ports_init: 1079 if (!reload) 1080 devlink_resources_unregister(devlink, NULL); 1081 err_register_resources: 1082 mlxsw_bus->fini(bus_priv); 1083 err_bus_init: 1084 if (!reload) 1085 devlink_free(devlink); 1086 err_devlink_alloc: 1087 return err; 1088 } 1089 1090 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 1091 const struct mlxsw_bus *mlxsw_bus, 1092 void *bus_priv, bool reload, 1093 struct devlink *devlink) 1094 { 1095 bool called_again = false; 1096 int err; 1097 1098 again: 1099 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus, 1100 bus_priv, reload, devlink); 1101 /* -EAGAIN is returned in case the FW was updated. FW needs 1102 * a reset, so lets try to call __mlxsw_core_bus_device_register() 1103 * again. 1104 */ 1105 if (err == -EAGAIN && !called_again) { 1106 called_again = true; 1107 goto again; 1108 } 1109 1110 return err; 1111 } 1112 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 1113 1114 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 1115 bool reload) 1116 { 1117 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1118 1119 if (mlxsw_core->reload_fail) { 1120 if (!reload) 1121 /* Only the parts that were not de-initialized in the 1122 * failed reload attempt need to be de-initialized. 1123 */ 1124 goto reload_fail_deinit; 1125 else 1126 return; 1127 } 1128 1129 if (mlxsw_core->driver->fini) 1130 mlxsw_core->driver->fini(mlxsw_core); 1131 mlxsw_thermal_fini(mlxsw_core->thermal); 1132 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1133 if (mlxsw_core->driver->params_unregister && !reload) 1134 mlxsw_core->driver->params_unregister(mlxsw_core); 1135 if (!reload) 1136 devlink_unregister(devlink); 1137 mlxsw_emad_fini(mlxsw_core); 1138 kfree(mlxsw_core->lag.mapping); 1139 mlxsw_ports_fini(mlxsw_core); 1140 if (!reload) 1141 devlink_resources_unregister(devlink, NULL); 1142 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 1143 1144 return; 1145 1146 reload_fail_deinit: 1147 if (mlxsw_core->driver->params_unregister) 1148 mlxsw_core->driver->params_unregister(mlxsw_core); 1149 devlink_unregister(devlink); 1150 devlink_resources_unregister(devlink, NULL); 1151 devlink_free(devlink); 1152 } 1153 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 1154 1155 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 1156 const struct mlxsw_tx_info *tx_info) 1157 { 1158 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 1159 tx_info); 1160 } 1161 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 1162 1163 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1164 const struct mlxsw_tx_info *tx_info) 1165 { 1166 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 1167 tx_info); 1168 } 1169 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 1170 1171 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 1172 const struct mlxsw_rx_listener *rxl_b) 1173 { 1174 return (rxl_a->func == rxl_b->func && 1175 rxl_a->local_port == rxl_b->local_port && 1176 rxl_a->trap_id == rxl_b->trap_id); 1177 } 1178 1179 static struct mlxsw_rx_listener_item * 1180 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 1181 const struct mlxsw_rx_listener *rxl, 1182 void *priv) 1183 { 1184 struct mlxsw_rx_listener_item *rxl_item; 1185 1186 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 1187 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 1188 rxl_item->priv == priv) 1189 return rxl_item; 1190 } 1191 return NULL; 1192 } 1193 1194 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 1195 const struct mlxsw_rx_listener *rxl, 1196 void *priv) 1197 { 1198 struct mlxsw_rx_listener_item *rxl_item; 1199 1200 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1201 if (rxl_item) 1202 return -EEXIST; 1203 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 1204 if (!rxl_item) 1205 return -ENOMEM; 1206 rxl_item->rxl = *rxl; 1207 rxl_item->priv = priv; 1208 1209 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 1210 return 0; 1211 } 1212 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 1213 1214 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 1215 const struct mlxsw_rx_listener *rxl, 1216 void *priv) 1217 { 1218 struct mlxsw_rx_listener_item *rxl_item; 1219 1220 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1221 if (!rxl_item) 1222 return; 1223 list_del_rcu(&rxl_item->list); 1224 synchronize_rcu(); 1225 kfree(rxl_item); 1226 } 1227 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 1228 1229 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 1230 void *priv) 1231 { 1232 struct mlxsw_event_listener_item *event_listener_item = priv; 1233 struct mlxsw_reg_info reg; 1234 char *payload; 1235 char *op_tlv = mlxsw_emad_op_tlv(skb); 1236 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 1237 1238 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 1239 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 1240 payload = mlxsw_emad_reg_payload(op_tlv); 1241 event_listener_item->el.func(®, payload, event_listener_item->priv); 1242 dev_kfree_skb(skb); 1243 } 1244 1245 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 1246 const struct mlxsw_event_listener *el_b) 1247 { 1248 return (el_a->func == el_b->func && 1249 el_a->trap_id == el_b->trap_id); 1250 } 1251 1252 static struct mlxsw_event_listener_item * 1253 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 1254 const struct mlxsw_event_listener *el, 1255 void *priv) 1256 { 1257 struct mlxsw_event_listener_item *el_item; 1258 1259 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 1260 if (__is_event_listener_equal(&el_item->el, el) && 1261 el_item->priv == priv) 1262 return el_item; 1263 } 1264 return NULL; 1265 } 1266 1267 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 1268 const struct mlxsw_event_listener *el, 1269 void *priv) 1270 { 1271 int err; 1272 struct mlxsw_event_listener_item *el_item; 1273 const struct mlxsw_rx_listener rxl = { 1274 .func = mlxsw_core_event_listener_func, 1275 .local_port = MLXSW_PORT_DONT_CARE, 1276 .trap_id = el->trap_id, 1277 }; 1278 1279 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1280 if (el_item) 1281 return -EEXIST; 1282 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1283 if (!el_item) 1284 return -ENOMEM; 1285 el_item->el = *el; 1286 el_item->priv = priv; 1287 1288 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1289 if (err) 1290 goto err_rx_listener_register; 1291 1292 /* No reason to save item if we did not manage to register an RX 1293 * listener for it. 1294 */ 1295 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1296 1297 return 0; 1298 1299 err_rx_listener_register: 1300 kfree(el_item); 1301 return err; 1302 } 1303 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1304 1305 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1306 const struct mlxsw_event_listener *el, 1307 void *priv) 1308 { 1309 struct mlxsw_event_listener_item *el_item; 1310 const struct mlxsw_rx_listener rxl = { 1311 .func = mlxsw_core_event_listener_func, 1312 .local_port = MLXSW_PORT_DONT_CARE, 1313 .trap_id = el->trap_id, 1314 }; 1315 1316 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1317 if (!el_item) 1318 return; 1319 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1320 list_del(&el_item->list); 1321 kfree(el_item); 1322 } 1323 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1324 1325 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 1326 const struct mlxsw_listener *listener, 1327 void *priv) 1328 { 1329 if (listener->is_event) 1330 return mlxsw_core_event_listener_register(mlxsw_core, 1331 &listener->u.event_listener, 1332 priv); 1333 else 1334 return mlxsw_core_rx_listener_register(mlxsw_core, 1335 &listener->u.rx_listener, 1336 priv); 1337 } 1338 1339 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 1340 const struct mlxsw_listener *listener, 1341 void *priv) 1342 { 1343 if (listener->is_event) 1344 mlxsw_core_event_listener_unregister(mlxsw_core, 1345 &listener->u.event_listener, 1346 priv); 1347 else 1348 mlxsw_core_rx_listener_unregister(mlxsw_core, 1349 &listener->u.rx_listener, 1350 priv); 1351 } 1352 1353 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 1354 const struct mlxsw_listener *listener, void *priv) 1355 { 1356 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1357 int err; 1358 1359 err = mlxsw_core_listener_register(mlxsw_core, listener, priv); 1360 if (err) 1361 return err; 1362 1363 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, 1364 listener->trap_group, listener->is_ctrl); 1365 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1366 if (err) 1367 goto err_trap_set; 1368 1369 return 0; 1370 1371 err_trap_set: 1372 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1373 return err; 1374 } 1375 EXPORT_SYMBOL(mlxsw_core_trap_register); 1376 1377 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 1378 const struct mlxsw_listener *listener, 1379 void *priv) 1380 { 1381 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1382 1383 if (!listener->is_event) { 1384 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, 1385 listener->trap_id, listener->trap_group, 1386 listener->is_ctrl); 1387 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1388 } 1389 1390 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1391 } 1392 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 1393 1394 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1395 { 1396 return atomic64_inc_return(&mlxsw_core->emad.tid); 1397 } 1398 1399 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1400 const struct mlxsw_reg_info *reg, 1401 char *payload, 1402 enum mlxsw_core_reg_access_type type, 1403 struct list_head *bulk_list, 1404 mlxsw_reg_trans_cb_t *cb, 1405 unsigned long cb_priv) 1406 { 1407 u64 tid = mlxsw_core_tid_get(mlxsw_core); 1408 struct mlxsw_reg_trans *trans; 1409 int err; 1410 1411 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1412 if (!trans) 1413 return -ENOMEM; 1414 1415 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1416 bulk_list, cb, cb_priv, tid); 1417 if (err) { 1418 kfree(trans); 1419 return err; 1420 } 1421 return 0; 1422 } 1423 1424 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1425 const struct mlxsw_reg_info *reg, char *payload, 1426 struct list_head *bulk_list, 1427 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1428 { 1429 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1430 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1431 bulk_list, cb, cb_priv); 1432 } 1433 EXPORT_SYMBOL(mlxsw_reg_trans_query); 1434 1435 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1436 const struct mlxsw_reg_info *reg, char *payload, 1437 struct list_head *bulk_list, 1438 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1439 { 1440 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1441 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1442 bulk_list, cb, cb_priv); 1443 } 1444 EXPORT_SYMBOL(mlxsw_reg_trans_write); 1445 1446 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1447 { 1448 struct mlxsw_core *mlxsw_core = trans->core; 1449 int err; 1450 1451 wait_for_completion(&trans->completion); 1452 cancel_delayed_work_sync(&trans->timeout_dw); 1453 err = trans->err; 1454 1455 if (trans->retries) 1456 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1457 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1458 if (err) 1459 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1460 trans->tid, trans->reg->id, 1461 mlxsw_reg_id_str(trans->reg->id), 1462 mlxsw_core_reg_access_type_str(trans->type), 1463 trans->emad_status, 1464 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1465 1466 list_del(&trans->bulk_list); 1467 kfree_rcu(trans, rcu); 1468 return err; 1469 } 1470 1471 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1472 { 1473 struct mlxsw_reg_trans *trans; 1474 struct mlxsw_reg_trans *tmp; 1475 int sum_err = 0; 1476 int err; 1477 1478 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1479 err = mlxsw_reg_trans_wait(trans); 1480 if (err && sum_err == 0) 1481 sum_err = err; /* first error to be returned */ 1482 } 1483 return sum_err; 1484 } 1485 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1486 1487 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1488 const struct mlxsw_reg_info *reg, 1489 char *payload, 1490 enum mlxsw_core_reg_access_type type) 1491 { 1492 enum mlxsw_emad_op_tlv_status status; 1493 int err, n_retry; 1494 bool reset_ok; 1495 char *in_mbox, *out_mbox, *tmp; 1496 1497 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1498 reg->id, mlxsw_reg_id_str(reg->id), 1499 mlxsw_core_reg_access_type_str(type)); 1500 1501 in_mbox = mlxsw_cmd_mbox_alloc(); 1502 if (!in_mbox) 1503 return -ENOMEM; 1504 1505 out_mbox = mlxsw_cmd_mbox_alloc(); 1506 if (!out_mbox) { 1507 err = -ENOMEM; 1508 goto free_in_mbox; 1509 } 1510 1511 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1512 mlxsw_core_tid_get(mlxsw_core)); 1513 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1514 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1515 1516 /* There is a special treatment needed for MRSR (reset) register. 1517 * The command interface will return error after the command 1518 * is executed, so tell the lower layer to expect it 1519 * and cope accordingly. 1520 */ 1521 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 1522 1523 n_retry = 0; 1524 retry: 1525 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 1526 if (!err) { 1527 err = mlxsw_emad_process_status(out_mbox, &status); 1528 if (err) { 1529 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1530 goto retry; 1531 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1532 status, mlxsw_emad_op_tlv_status_str(status)); 1533 } 1534 } 1535 1536 if (!err) 1537 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1538 reg->len); 1539 1540 mlxsw_cmd_mbox_free(out_mbox); 1541 free_in_mbox: 1542 mlxsw_cmd_mbox_free(in_mbox); 1543 if (err) 1544 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1545 reg->id, mlxsw_reg_id_str(reg->id), 1546 mlxsw_core_reg_access_type_str(type)); 1547 return err; 1548 } 1549 1550 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1551 char *payload, size_t payload_len, 1552 unsigned long cb_priv) 1553 { 1554 char *orig_payload = (char *) cb_priv; 1555 1556 memcpy(orig_payload, payload, payload_len); 1557 } 1558 1559 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1560 const struct mlxsw_reg_info *reg, 1561 char *payload, 1562 enum mlxsw_core_reg_access_type type) 1563 { 1564 LIST_HEAD(bulk_list); 1565 int err; 1566 1567 /* During initialization EMAD interface is not available to us, 1568 * so we default to command interface. We switch to EMAD interface 1569 * after setting the appropriate traps. 1570 */ 1571 if (!mlxsw_core->emad.use_emad) 1572 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1573 payload, type); 1574 1575 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1576 payload, type, &bulk_list, 1577 mlxsw_core_reg_access_cb, 1578 (unsigned long) payload); 1579 if (err) 1580 return err; 1581 return mlxsw_reg_trans_bulk_wait(&bulk_list); 1582 } 1583 1584 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1585 const struct mlxsw_reg_info *reg, char *payload) 1586 { 1587 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1588 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1589 } 1590 EXPORT_SYMBOL(mlxsw_reg_query); 1591 1592 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1593 const struct mlxsw_reg_info *reg, char *payload) 1594 { 1595 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1596 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1597 } 1598 EXPORT_SYMBOL(mlxsw_reg_write); 1599 1600 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1601 struct mlxsw_rx_info *rx_info) 1602 { 1603 struct mlxsw_rx_listener_item *rxl_item; 1604 const struct mlxsw_rx_listener *rxl; 1605 u8 local_port; 1606 bool found = false; 1607 1608 if (rx_info->is_lag) { 1609 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 1610 __func__, rx_info->u.lag_id, 1611 rx_info->trap_id); 1612 /* Upper layer does not care if the skb came from LAG or not, 1613 * so just get the local_port for the lag port and push it up. 1614 */ 1615 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 1616 rx_info->u.lag_id, 1617 rx_info->lag_port_index); 1618 } else { 1619 local_port = rx_info->u.sys_port; 1620 } 1621 1622 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 1623 __func__, local_port, rx_info->trap_id); 1624 1625 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1626 (local_port >= mlxsw_core->max_ports)) 1627 goto drop; 1628 1629 rcu_read_lock(); 1630 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1631 rxl = &rxl_item->rxl; 1632 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1633 rxl->local_port == local_port) && 1634 rxl->trap_id == rx_info->trap_id) { 1635 found = true; 1636 break; 1637 } 1638 } 1639 rcu_read_unlock(); 1640 if (!found) 1641 goto drop; 1642 1643 rxl->func(skb, local_port, rxl_item->priv); 1644 return; 1645 1646 drop: 1647 dev_kfree_skb(skb); 1648 } 1649 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1650 1651 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 1652 u16 lag_id, u8 port_index) 1653 { 1654 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 1655 port_index; 1656 } 1657 1658 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 1659 u16 lag_id, u8 port_index, u8 local_port) 1660 { 1661 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1662 lag_id, port_index); 1663 1664 mlxsw_core->lag.mapping[index] = local_port; 1665 } 1666 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 1667 1668 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 1669 u16 lag_id, u8 port_index) 1670 { 1671 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1672 lag_id, port_index); 1673 1674 return mlxsw_core->lag.mapping[index]; 1675 } 1676 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 1677 1678 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 1679 u16 lag_id, u8 local_port) 1680 { 1681 int i; 1682 1683 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 1684 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1685 lag_id, i); 1686 1687 if (mlxsw_core->lag.mapping[index] == local_port) 1688 mlxsw_core->lag.mapping[index] = 0; 1689 } 1690 } 1691 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1692 1693 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 1694 enum mlxsw_res_id res_id) 1695 { 1696 return mlxsw_res_valid(&mlxsw_core->res, res_id); 1697 } 1698 EXPORT_SYMBOL(mlxsw_core_res_valid); 1699 1700 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 1701 enum mlxsw_res_id res_id) 1702 { 1703 return mlxsw_res_get(&mlxsw_core->res, res_id); 1704 } 1705 EXPORT_SYMBOL(mlxsw_core_res_get); 1706 1707 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) 1708 { 1709 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1710 struct mlxsw_core_port *mlxsw_core_port = 1711 &mlxsw_core->ports[local_port]; 1712 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1713 int err; 1714 1715 mlxsw_core_port->local_port = local_port; 1716 err = devlink_port_register(devlink, devlink_port, local_port); 1717 if (err) 1718 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1719 return err; 1720 } 1721 EXPORT_SYMBOL(mlxsw_core_port_init); 1722 1723 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) 1724 { 1725 struct mlxsw_core_port *mlxsw_core_port = 1726 &mlxsw_core->ports[local_port]; 1727 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1728 1729 devlink_port_unregister(devlink_port); 1730 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1731 } 1732 EXPORT_SYMBOL(mlxsw_core_port_fini); 1733 1734 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1735 void *port_driver_priv, struct net_device *dev, 1736 u32 port_number, bool split, 1737 u32 split_port_subnumber) 1738 { 1739 struct mlxsw_core_port *mlxsw_core_port = 1740 &mlxsw_core->ports[local_port]; 1741 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1742 1743 mlxsw_core_port->port_driver_priv = port_driver_priv; 1744 devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, 1745 port_number, split, split_port_subnumber); 1746 devlink_port_type_eth_set(devlink_port, dev); 1747 } 1748 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 1749 1750 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1751 void *port_driver_priv) 1752 { 1753 struct mlxsw_core_port *mlxsw_core_port = 1754 &mlxsw_core->ports[local_port]; 1755 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1756 1757 mlxsw_core_port->port_driver_priv = port_driver_priv; 1758 devlink_port_type_ib_set(devlink_port, NULL); 1759 } 1760 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 1761 1762 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, 1763 void *port_driver_priv) 1764 { 1765 struct mlxsw_core_port *mlxsw_core_port = 1766 &mlxsw_core->ports[local_port]; 1767 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1768 1769 mlxsw_core_port->port_driver_priv = port_driver_priv; 1770 devlink_port_type_clear(devlink_port); 1771 } 1772 EXPORT_SYMBOL(mlxsw_core_port_clear); 1773 1774 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 1775 u8 local_port) 1776 { 1777 struct mlxsw_core_port *mlxsw_core_port = 1778 &mlxsw_core->ports[local_port]; 1779 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1780 1781 return devlink_port->type; 1782 } 1783 EXPORT_SYMBOL(mlxsw_core_port_type_get); 1784 1785 int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, 1786 u8 local_port, char *name, size_t len) 1787 { 1788 struct mlxsw_core_port *mlxsw_core_port = 1789 &mlxsw_core->ports[local_port]; 1790 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1791 1792 return devlink_port_get_phys_port_name(devlink_port, name, len); 1793 } 1794 EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); 1795 1796 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1797 const char *buf, size_t size) 1798 { 1799 __be32 *m = (__be32 *) buf; 1800 int i; 1801 int count = size / sizeof(__be32); 1802 1803 for (i = count - 1; i >= 0; i--) 1804 if (m[i]) 1805 break; 1806 i++; 1807 count = i ? i : 1; 1808 for (i = 0; i < count; i += 4) 1809 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1810 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1811 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1812 } 1813 1814 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1815 u32 in_mod, bool out_mbox_direct, bool reset_ok, 1816 char *in_mbox, size_t in_mbox_size, 1817 char *out_mbox, size_t out_mbox_size) 1818 { 1819 u8 status; 1820 int err; 1821 1822 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1823 if (!mlxsw_core->bus->cmd_exec) 1824 return -EOPNOTSUPP; 1825 1826 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1827 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1828 if (in_mbox) { 1829 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1830 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1831 } 1832 1833 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1834 opcode_mod, in_mod, out_mbox_direct, 1835 in_mbox, in_mbox_size, 1836 out_mbox, out_mbox_size, &status); 1837 1838 if (!err && out_mbox) { 1839 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1840 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1841 } 1842 1843 if (reset_ok && err == -EIO && 1844 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 1845 err = 0; 1846 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1847 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1848 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1849 in_mod, status, mlxsw_cmd_status_str(status)); 1850 } else if (err == -ETIMEDOUT) { 1851 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1852 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1853 in_mod); 1854 } 1855 1856 return err; 1857 } 1858 EXPORT_SYMBOL(mlxsw_cmd_exec); 1859 1860 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1861 { 1862 return queue_delayed_work(mlxsw_wq, dwork, delay); 1863 } 1864 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1865 1866 bool mlxsw_core_schedule_work(struct work_struct *work) 1867 { 1868 return queue_work(mlxsw_owq, work); 1869 } 1870 EXPORT_SYMBOL(mlxsw_core_schedule_work); 1871 1872 void mlxsw_core_flush_owq(void) 1873 { 1874 flush_workqueue(mlxsw_owq); 1875 } 1876 EXPORT_SYMBOL(mlxsw_core_flush_owq); 1877 1878 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 1879 const struct mlxsw_config_profile *profile, 1880 u64 *p_single_size, u64 *p_double_size, 1881 u64 *p_linear_size) 1882 { 1883 struct mlxsw_driver *driver = mlxsw_core->driver; 1884 1885 if (!driver->kvd_sizes_get) 1886 return -EINVAL; 1887 1888 return driver->kvd_sizes_get(mlxsw_core, profile, 1889 p_single_size, p_double_size, 1890 p_linear_size); 1891 } 1892 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 1893 1894 static int __init mlxsw_core_module_init(void) 1895 { 1896 int err; 1897 1898 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 1899 if (!mlxsw_wq) 1900 return -ENOMEM; 1901 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 1902 mlxsw_core_driver_name); 1903 if (!mlxsw_owq) { 1904 err = -ENOMEM; 1905 goto err_alloc_ordered_workqueue; 1906 } 1907 return 0; 1908 1909 err_alloc_ordered_workqueue: 1910 destroy_workqueue(mlxsw_wq); 1911 return err; 1912 } 1913 1914 static void __exit mlxsw_core_module_exit(void) 1915 { 1916 destroy_workqueue(mlxsw_owq); 1917 destroy_workqueue(mlxsw_wq); 1918 } 1919 1920 module_init(mlxsw_core_module_init); 1921 module_exit(mlxsw_core_module_exit); 1922 1923 MODULE_LICENSE("Dual BSD/GPL"); 1924 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1925 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1926