1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <asm/byteorder.h> 24 #include <net/devlink.h> 25 #include <trace/events/devlink.h> 26 27 #include "core.h" 28 #include "item.h" 29 #include "cmd.h" 30 #include "port.h" 31 #include "trap.h" 32 #include "emad.h" 33 #include "reg.h" 34 #include "resources.h" 35 36 static LIST_HEAD(mlxsw_core_driver_list); 37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 38 39 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 40 41 static struct workqueue_struct *mlxsw_wq; 42 static struct workqueue_struct *mlxsw_owq; 43 44 struct mlxsw_core_port { 45 struct devlink_port devlink_port; 46 void *port_driver_priv; 47 u8 local_port; 48 }; 49 50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 51 { 52 return mlxsw_core_port->port_driver_priv; 53 } 54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 55 56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 57 { 58 return mlxsw_core_port->port_driver_priv != NULL; 59 } 60 61 struct mlxsw_core { 62 struct mlxsw_driver *driver; 63 const struct mlxsw_bus *bus; 64 void *bus_priv; 65 const struct mlxsw_bus_info *bus_info; 66 struct workqueue_struct *emad_wq; 67 struct list_head rx_listener_list; 68 struct list_head event_listener_list; 69 struct { 70 atomic64_t tid; 71 struct list_head trans_list; 72 spinlock_t trans_list_lock; /* protects trans_list writes */ 73 bool use_emad; 74 } emad; 75 struct { 76 u8 *mapping; /* lag_id+port_index to local_port mapping */ 77 } lag; 78 struct mlxsw_res res; 79 struct mlxsw_hwmon *hwmon; 80 struct mlxsw_thermal *thermal; 81 struct mlxsw_core_port *ports; 82 unsigned int max_ports; 83 bool reload_fail; 84 unsigned long driver_priv[0]; 85 /* driver_priv has to be always the last item */ 86 }; 87 88 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 89 90 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) 91 { 92 /* Switch ports are numbered from 1 to queried value */ 93 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 94 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 95 MAX_SYSTEM_PORT) + 1; 96 else 97 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 98 99 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 100 sizeof(struct mlxsw_core_port), GFP_KERNEL); 101 if (!mlxsw_core->ports) 102 return -ENOMEM; 103 104 return 0; 105 } 106 107 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) 108 { 109 kfree(mlxsw_core->ports); 110 } 111 112 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 113 { 114 return mlxsw_core->max_ports; 115 } 116 EXPORT_SYMBOL(mlxsw_core_max_ports); 117 118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 119 { 120 return mlxsw_core->driver_priv; 121 } 122 EXPORT_SYMBOL(mlxsw_core_driver_priv); 123 124 struct mlxsw_rx_listener_item { 125 struct list_head list; 126 struct mlxsw_rx_listener rxl; 127 void *priv; 128 }; 129 130 struct mlxsw_event_listener_item { 131 struct list_head list; 132 struct mlxsw_event_listener el; 133 void *priv; 134 }; 135 136 /****************** 137 * EMAD processing 138 ******************/ 139 140 /* emad_eth_hdr_dmac 141 * Destination MAC in EMAD's Ethernet header. 142 * Must be set to 01:02:c9:00:00:01 143 */ 144 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 145 146 /* emad_eth_hdr_smac 147 * Source MAC in EMAD's Ethernet header. 148 * Must be set to 00:02:c9:01:02:03 149 */ 150 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 151 152 /* emad_eth_hdr_ethertype 153 * Ethertype in EMAD's Ethernet header. 154 * Must be set to 0x8932 155 */ 156 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 157 158 /* emad_eth_hdr_mlx_proto 159 * Mellanox protocol. 160 * Must be set to 0x0. 161 */ 162 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 163 164 /* emad_eth_hdr_ver 165 * Mellanox protocol version. 166 * Must be set to 0x0. 167 */ 168 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 169 170 /* emad_op_tlv_type 171 * Type of the TLV. 172 * Must be set to 0x1 (operation TLV). 173 */ 174 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 175 176 /* emad_op_tlv_len 177 * Length of the operation TLV in u32. 178 * Must be set to 0x4. 179 */ 180 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 181 182 /* emad_op_tlv_dr 183 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 184 * EMAD. DR TLV must follow. 185 * 186 * Note: Currently not supported and must not be set. 187 */ 188 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 189 190 /* emad_op_tlv_status 191 * Returned status in case of EMAD response. Must be set to 0 in case 192 * of EMAD request. 193 * 0x0 - success 194 * 0x1 - device is busy. Requester should retry 195 * 0x2 - Mellanox protocol version not supported 196 * 0x3 - unknown TLV 197 * 0x4 - register not supported 198 * 0x5 - operation class not supported 199 * 0x6 - EMAD method not supported 200 * 0x7 - bad parameter (e.g. port out of range) 201 * 0x8 - resource not available 202 * 0x9 - message receipt acknowledgment. Requester should retry 203 * 0x70 - internal error 204 */ 205 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 206 207 /* emad_op_tlv_register_id 208 * Register ID of register within register TLV. 209 */ 210 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 211 212 /* emad_op_tlv_r 213 * Response bit. Setting to 1 indicates Response, otherwise request. 214 */ 215 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 216 217 /* emad_op_tlv_method 218 * EMAD method type. 219 * 0x1 - query 220 * 0x2 - write 221 * 0x3 - send (currently not supported) 222 * 0x4 - event 223 */ 224 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 225 226 /* emad_op_tlv_class 227 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 228 */ 229 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 230 231 /* emad_op_tlv_tid 232 * EMAD transaction ID. Used for pairing request and response EMADs. 233 */ 234 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 235 236 /* emad_reg_tlv_type 237 * Type of the TLV. 238 * Must be set to 0x3 (register TLV). 239 */ 240 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 241 242 /* emad_reg_tlv_len 243 * Length of the operation TLV in u32. 244 */ 245 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 246 247 /* emad_end_tlv_type 248 * Type of the TLV. 249 * Must be set to 0x0 (end TLV). 250 */ 251 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 252 253 /* emad_end_tlv_len 254 * Length of the end TLV in u32. 255 * Must be set to 1. 256 */ 257 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 258 259 enum mlxsw_core_reg_access_type { 260 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 261 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 262 }; 263 264 static inline const char * 265 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 266 { 267 switch (type) { 268 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 269 return "query"; 270 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 271 return "write"; 272 } 273 BUG(); 274 } 275 276 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 277 { 278 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 279 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 280 } 281 282 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 283 const struct mlxsw_reg_info *reg, 284 char *payload) 285 { 286 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 287 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 288 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 289 } 290 291 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 292 const struct mlxsw_reg_info *reg, 293 enum mlxsw_core_reg_access_type type, 294 u64 tid) 295 { 296 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 297 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 298 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 299 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 300 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 301 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 302 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 303 mlxsw_emad_op_tlv_method_set(op_tlv, 304 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 305 else 306 mlxsw_emad_op_tlv_method_set(op_tlv, 307 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 308 mlxsw_emad_op_tlv_class_set(op_tlv, 309 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 310 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 311 } 312 313 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 314 { 315 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 316 317 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 318 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 319 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 320 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 321 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 322 323 skb_reset_mac_header(skb); 324 325 return 0; 326 } 327 328 static void mlxsw_emad_construct(struct sk_buff *skb, 329 const struct mlxsw_reg_info *reg, 330 char *payload, 331 enum mlxsw_core_reg_access_type type, 332 u64 tid) 333 { 334 char *buf; 335 336 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 337 mlxsw_emad_pack_end_tlv(buf); 338 339 buf = skb_push(skb, reg->len + sizeof(u32)); 340 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 341 342 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 343 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 344 345 mlxsw_emad_construct_eth_hdr(skb); 346 } 347 348 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 349 { 350 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 351 } 352 353 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 354 { 355 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 356 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 357 } 358 359 static char *mlxsw_emad_reg_payload(const char *op_tlv) 360 { 361 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 362 } 363 364 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 365 { 366 char *op_tlv; 367 368 op_tlv = mlxsw_emad_op_tlv(skb); 369 return mlxsw_emad_op_tlv_tid_get(op_tlv); 370 } 371 372 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 373 { 374 char *op_tlv; 375 376 op_tlv = mlxsw_emad_op_tlv(skb); 377 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 378 } 379 380 static int mlxsw_emad_process_status(char *op_tlv, 381 enum mlxsw_emad_op_tlv_status *p_status) 382 { 383 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 384 385 switch (*p_status) { 386 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 387 return 0; 388 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 389 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 390 return -EAGAIN; 391 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 392 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 393 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 394 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 395 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 396 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 397 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 398 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 399 default: 400 return -EIO; 401 } 402 } 403 404 static int 405 mlxsw_emad_process_status_skb(struct sk_buff *skb, 406 enum mlxsw_emad_op_tlv_status *p_status) 407 { 408 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 409 } 410 411 struct mlxsw_reg_trans { 412 struct list_head list; 413 struct list_head bulk_list; 414 struct mlxsw_core *core; 415 struct sk_buff *tx_skb; 416 struct mlxsw_tx_info tx_info; 417 struct delayed_work timeout_dw; 418 unsigned int retries; 419 u64 tid; 420 struct completion completion; 421 atomic_t active; 422 mlxsw_reg_trans_cb_t *cb; 423 unsigned long cb_priv; 424 const struct mlxsw_reg_info *reg; 425 enum mlxsw_core_reg_access_type type; 426 int err; 427 enum mlxsw_emad_op_tlv_status emad_status; 428 struct rcu_head rcu; 429 }; 430 431 #define MLXSW_EMAD_TIMEOUT_MS 200 432 433 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 434 { 435 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 436 437 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); 438 } 439 440 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 441 struct mlxsw_reg_trans *trans) 442 { 443 struct sk_buff *skb; 444 int err; 445 446 skb = skb_copy(trans->tx_skb, GFP_KERNEL); 447 if (!skb) 448 return -ENOMEM; 449 450 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 451 skb->data + mlxsw_core->driver->txhdr_len, 452 skb->len - mlxsw_core->driver->txhdr_len); 453 454 atomic_set(&trans->active, 1); 455 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 456 if (err) { 457 dev_kfree_skb(skb); 458 return err; 459 } 460 mlxsw_emad_trans_timeout_schedule(trans); 461 return 0; 462 } 463 464 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 465 { 466 struct mlxsw_core *mlxsw_core = trans->core; 467 468 dev_kfree_skb(trans->tx_skb); 469 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 470 list_del_rcu(&trans->list); 471 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 472 trans->err = err; 473 complete(&trans->completion); 474 } 475 476 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 477 struct mlxsw_reg_trans *trans) 478 { 479 int err; 480 481 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 482 trans->retries++; 483 err = mlxsw_emad_transmit(trans->core, trans); 484 if (err == 0) 485 return; 486 } else { 487 err = -EIO; 488 } 489 mlxsw_emad_trans_finish(trans, err); 490 } 491 492 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 493 { 494 struct mlxsw_reg_trans *trans = container_of(work, 495 struct mlxsw_reg_trans, 496 timeout_dw.work); 497 498 if (!atomic_dec_and_test(&trans->active)) 499 return; 500 501 mlxsw_emad_transmit_retry(trans->core, trans); 502 } 503 504 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 505 struct mlxsw_reg_trans *trans, 506 struct sk_buff *skb) 507 { 508 int err; 509 510 if (!atomic_dec_and_test(&trans->active)) 511 return; 512 513 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 514 if (err == -EAGAIN) { 515 mlxsw_emad_transmit_retry(mlxsw_core, trans); 516 } else { 517 if (err == 0) { 518 char *op_tlv = mlxsw_emad_op_tlv(skb); 519 520 if (trans->cb) 521 trans->cb(mlxsw_core, 522 mlxsw_emad_reg_payload(op_tlv), 523 trans->reg->len, trans->cb_priv); 524 } 525 mlxsw_emad_trans_finish(trans, err); 526 } 527 } 528 529 /* called with rcu read lock held */ 530 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 531 void *priv) 532 { 533 struct mlxsw_core *mlxsw_core = priv; 534 struct mlxsw_reg_trans *trans; 535 536 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 537 skb->data, skb->len); 538 539 if (!mlxsw_emad_is_resp(skb)) 540 goto free_skb; 541 542 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 543 if (mlxsw_emad_get_tid(skb) == trans->tid) { 544 mlxsw_emad_process_response(mlxsw_core, trans, skb); 545 break; 546 } 547 } 548 549 free_skb: 550 dev_kfree_skb(skb); 551 } 552 553 static const struct mlxsw_listener mlxsw_emad_rx_listener = 554 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 555 EMAD, DISCARD); 556 557 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 558 { 559 struct workqueue_struct *emad_wq; 560 u64 tid; 561 int err; 562 563 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 564 return 0; 565 566 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); 567 if (!emad_wq) 568 return -ENOMEM; 569 mlxsw_core->emad_wq = emad_wq; 570 571 /* Set the upper 32 bits of the transaction ID field to a random 572 * number. This allows us to discard EMADs addressed to other 573 * devices. 574 */ 575 get_random_bytes(&tid, 4); 576 tid <<= 32; 577 atomic64_set(&mlxsw_core->emad.tid, tid); 578 579 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 580 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 581 582 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 583 mlxsw_core); 584 if (err) 585 return err; 586 587 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 588 if (err) 589 goto err_emad_trap_set; 590 mlxsw_core->emad.use_emad = true; 591 592 return 0; 593 594 err_emad_trap_set: 595 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 596 mlxsw_core); 597 destroy_workqueue(mlxsw_core->emad_wq); 598 return err; 599 } 600 601 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 602 { 603 604 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 605 return; 606 607 mlxsw_core->emad.use_emad = false; 608 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 609 mlxsw_core); 610 destroy_workqueue(mlxsw_core->emad_wq); 611 } 612 613 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 614 u16 reg_len) 615 { 616 struct sk_buff *skb; 617 u16 emad_len; 618 619 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 620 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 621 sizeof(u32) + mlxsw_core->driver->txhdr_len); 622 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 623 return NULL; 624 625 skb = netdev_alloc_skb(NULL, emad_len); 626 if (!skb) 627 return NULL; 628 memset(skb->data, 0, emad_len); 629 skb_reserve(skb, emad_len); 630 631 return skb; 632 } 633 634 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 635 const struct mlxsw_reg_info *reg, 636 char *payload, 637 enum mlxsw_core_reg_access_type type, 638 struct mlxsw_reg_trans *trans, 639 struct list_head *bulk_list, 640 mlxsw_reg_trans_cb_t *cb, 641 unsigned long cb_priv, u64 tid) 642 { 643 struct sk_buff *skb; 644 int err; 645 646 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 647 tid, reg->id, mlxsw_reg_id_str(reg->id), 648 mlxsw_core_reg_access_type_str(type)); 649 650 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 651 if (!skb) 652 return -ENOMEM; 653 654 list_add_tail(&trans->bulk_list, bulk_list); 655 trans->core = mlxsw_core; 656 trans->tx_skb = skb; 657 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 658 trans->tx_info.is_emad = true; 659 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 660 trans->tid = tid; 661 init_completion(&trans->completion); 662 trans->cb = cb; 663 trans->cb_priv = cb_priv; 664 trans->reg = reg; 665 trans->type = type; 666 667 mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 668 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 669 670 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 671 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 672 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 673 err = mlxsw_emad_transmit(mlxsw_core, trans); 674 if (err) 675 goto err_out; 676 return 0; 677 678 err_out: 679 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 680 list_del_rcu(&trans->list); 681 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 682 list_del(&trans->bulk_list); 683 dev_kfree_skb(trans->tx_skb); 684 return err; 685 } 686 687 /***************** 688 * Core functions 689 *****************/ 690 691 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 692 { 693 spin_lock(&mlxsw_core_driver_list_lock); 694 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 695 spin_unlock(&mlxsw_core_driver_list_lock); 696 return 0; 697 } 698 EXPORT_SYMBOL(mlxsw_core_driver_register); 699 700 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 701 { 702 spin_lock(&mlxsw_core_driver_list_lock); 703 list_del(&mlxsw_driver->list); 704 spin_unlock(&mlxsw_core_driver_list_lock); 705 } 706 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 707 708 static struct mlxsw_driver *__driver_find(const char *kind) 709 { 710 struct mlxsw_driver *mlxsw_driver; 711 712 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 713 if (strcmp(mlxsw_driver->kind, kind) == 0) 714 return mlxsw_driver; 715 } 716 return NULL; 717 } 718 719 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 720 { 721 struct mlxsw_driver *mlxsw_driver; 722 723 spin_lock(&mlxsw_core_driver_list_lock); 724 mlxsw_driver = __driver_find(kind); 725 spin_unlock(&mlxsw_core_driver_list_lock); 726 return mlxsw_driver; 727 } 728 729 static int mlxsw_devlink_port_split(struct devlink *devlink, 730 unsigned int port_index, 731 unsigned int count, 732 struct netlink_ext_ack *extack) 733 { 734 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 735 736 if (port_index >= mlxsw_core->max_ports) { 737 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 738 return -EINVAL; 739 } 740 if (!mlxsw_core->driver->port_split) 741 return -EOPNOTSUPP; 742 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, 743 extack); 744 } 745 746 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 747 unsigned int port_index, 748 struct netlink_ext_ack *extack) 749 { 750 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 751 752 if (port_index >= mlxsw_core->max_ports) { 753 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 754 return -EINVAL; 755 } 756 if (!mlxsw_core->driver->port_unsplit) 757 return -EOPNOTSUPP; 758 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, 759 extack); 760 } 761 762 static int 763 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 764 unsigned int sb_index, u16 pool_index, 765 struct devlink_sb_pool_info *pool_info) 766 { 767 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 768 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 769 770 if (!mlxsw_driver->sb_pool_get) 771 return -EOPNOTSUPP; 772 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 773 pool_index, pool_info); 774 } 775 776 static int 777 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 778 unsigned int sb_index, u16 pool_index, u32 size, 779 enum devlink_sb_threshold_type threshold_type) 780 { 781 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 782 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 783 784 if (!mlxsw_driver->sb_pool_set) 785 return -EOPNOTSUPP; 786 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 787 pool_index, size, threshold_type); 788 } 789 790 static void *__dl_port(struct devlink_port *devlink_port) 791 { 792 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 793 } 794 795 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 796 enum devlink_port_type port_type) 797 { 798 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 799 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 800 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 801 802 if (!mlxsw_driver->port_type_set) 803 return -EOPNOTSUPP; 804 805 return mlxsw_driver->port_type_set(mlxsw_core, 806 mlxsw_core_port->local_port, 807 port_type); 808 } 809 810 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 811 unsigned int sb_index, u16 pool_index, 812 u32 *p_threshold) 813 { 814 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 815 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 816 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 817 818 if (!mlxsw_driver->sb_port_pool_get || 819 !mlxsw_core_port_check(mlxsw_core_port)) 820 return -EOPNOTSUPP; 821 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 822 pool_index, p_threshold); 823 } 824 825 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 826 unsigned int sb_index, u16 pool_index, 827 u32 threshold) 828 { 829 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 830 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 831 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 832 833 if (!mlxsw_driver->sb_port_pool_set || 834 !mlxsw_core_port_check(mlxsw_core_port)) 835 return -EOPNOTSUPP; 836 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 837 pool_index, threshold); 838 } 839 840 static int 841 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 842 unsigned int sb_index, u16 tc_index, 843 enum devlink_sb_pool_type pool_type, 844 u16 *p_pool_index, u32 *p_threshold) 845 { 846 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 847 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 848 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 849 850 if (!mlxsw_driver->sb_tc_pool_bind_get || 851 !mlxsw_core_port_check(mlxsw_core_port)) 852 return -EOPNOTSUPP; 853 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 854 tc_index, pool_type, 855 p_pool_index, p_threshold); 856 } 857 858 static int 859 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 860 unsigned int sb_index, u16 tc_index, 861 enum devlink_sb_pool_type pool_type, 862 u16 pool_index, u32 threshold) 863 { 864 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 865 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 866 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 867 868 if (!mlxsw_driver->sb_tc_pool_bind_set || 869 !mlxsw_core_port_check(mlxsw_core_port)) 870 return -EOPNOTSUPP; 871 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 872 tc_index, pool_type, 873 pool_index, threshold); 874 } 875 876 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 877 unsigned int sb_index) 878 { 879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 881 882 if (!mlxsw_driver->sb_occ_snapshot) 883 return -EOPNOTSUPP; 884 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 885 } 886 887 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 888 unsigned int sb_index) 889 { 890 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 891 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 892 893 if (!mlxsw_driver->sb_occ_max_clear) 894 return -EOPNOTSUPP; 895 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 896 } 897 898 static int 899 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 900 unsigned int sb_index, u16 pool_index, 901 u32 *p_cur, u32 *p_max) 902 { 903 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 904 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 905 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 906 907 if (!mlxsw_driver->sb_occ_port_pool_get || 908 !mlxsw_core_port_check(mlxsw_core_port)) 909 return -EOPNOTSUPP; 910 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 911 pool_index, p_cur, p_max); 912 } 913 914 static int 915 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 916 unsigned int sb_index, u16 tc_index, 917 enum devlink_sb_pool_type pool_type, 918 u32 *p_cur, u32 *p_max) 919 { 920 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 921 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 922 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 923 924 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 925 !mlxsw_core_port_check(mlxsw_core_port)) 926 return -EOPNOTSUPP; 927 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 928 sb_index, tc_index, 929 pool_type, p_cur, p_max); 930 } 931 932 static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, 933 struct netlink_ext_ack *extack) 934 { 935 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 936 int err; 937 938 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 939 return -EOPNOTSUPP; 940 941 mlxsw_core_bus_device_unregister(mlxsw_core, true); 942 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, 943 mlxsw_core->bus, 944 mlxsw_core->bus_priv, true, 945 devlink); 946 mlxsw_core->reload_fail = !!err; 947 948 return err; 949 } 950 951 static const struct devlink_ops mlxsw_devlink_ops = { 952 .reload = mlxsw_devlink_core_bus_device_reload, 953 .port_type_set = mlxsw_devlink_port_type_set, 954 .port_split = mlxsw_devlink_port_split, 955 .port_unsplit = mlxsw_devlink_port_unsplit, 956 .sb_pool_get = mlxsw_devlink_sb_pool_get, 957 .sb_pool_set = mlxsw_devlink_sb_pool_set, 958 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 959 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 960 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 961 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 962 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 963 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 964 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 965 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 966 }; 967 968 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 969 const struct mlxsw_bus *mlxsw_bus, 970 void *bus_priv, bool reload, 971 struct devlink *devlink) 972 { 973 const char *device_kind = mlxsw_bus_info->device_kind; 974 struct mlxsw_core *mlxsw_core; 975 struct mlxsw_driver *mlxsw_driver; 976 struct mlxsw_res *res; 977 size_t alloc_size; 978 int err; 979 980 mlxsw_driver = mlxsw_core_driver_get(device_kind); 981 if (!mlxsw_driver) 982 return -EINVAL; 983 984 if (!reload) { 985 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 986 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); 987 if (!devlink) { 988 err = -ENOMEM; 989 goto err_devlink_alloc; 990 } 991 } 992 993 mlxsw_core = devlink_priv(devlink); 994 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 995 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 996 mlxsw_core->driver = mlxsw_driver; 997 mlxsw_core->bus = mlxsw_bus; 998 mlxsw_core->bus_priv = bus_priv; 999 mlxsw_core->bus_info = mlxsw_bus_info; 1000 1001 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; 1002 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); 1003 if (err) 1004 goto err_bus_init; 1005 1006 if (mlxsw_driver->resources_register && !reload) { 1007 err = mlxsw_driver->resources_register(mlxsw_core); 1008 if (err) 1009 goto err_register_resources; 1010 } 1011 1012 err = mlxsw_ports_init(mlxsw_core); 1013 if (err) 1014 goto err_ports_init; 1015 1016 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 1017 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 1018 alloc_size = sizeof(u8) * 1019 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 1020 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 1021 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 1022 if (!mlxsw_core->lag.mapping) { 1023 err = -ENOMEM; 1024 goto err_alloc_lag_mapping; 1025 } 1026 } 1027 1028 err = mlxsw_emad_init(mlxsw_core); 1029 if (err) 1030 goto err_emad_init; 1031 1032 if (!reload) { 1033 err = devlink_register(devlink, mlxsw_bus_info->dev); 1034 if (err) 1035 goto err_devlink_register; 1036 } 1037 1038 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 1039 if (err) 1040 goto err_hwmon_init; 1041 1042 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 1043 &mlxsw_core->thermal); 1044 if (err) 1045 goto err_thermal_init; 1046 1047 if (mlxsw_driver->init) { 1048 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); 1049 if (err) 1050 goto err_driver_init; 1051 } 1052 1053 return 0; 1054 1055 err_driver_init: 1056 mlxsw_thermal_fini(mlxsw_core->thermal); 1057 err_thermal_init: 1058 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1059 err_hwmon_init: 1060 if (!reload) 1061 devlink_unregister(devlink); 1062 err_devlink_register: 1063 mlxsw_emad_fini(mlxsw_core); 1064 err_emad_init: 1065 kfree(mlxsw_core->lag.mapping); 1066 err_alloc_lag_mapping: 1067 mlxsw_ports_fini(mlxsw_core); 1068 err_ports_init: 1069 if (!reload) 1070 devlink_resources_unregister(devlink, NULL); 1071 err_register_resources: 1072 mlxsw_bus->fini(bus_priv); 1073 err_bus_init: 1074 if (!reload) 1075 devlink_free(devlink); 1076 err_devlink_alloc: 1077 return err; 1078 } 1079 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 1080 1081 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 1082 bool reload) 1083 { 1084 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1085 1086 if (mlxsw_core->reload_fail) { 1087 if (!reload) 1088 /* Only the parts that were not de-initialized in the 1089 * failed reload attempt need to be de-initialized. 1090 */ 1091 goto reload_fail_deinit; 1092 else 1093 return; 1094 } 1095 1096 if (mlxsw_core->driver->fini) 1097 mlxsw_core->driver->fini(mlxsw_core); 1098 mlxsw_thermal_fini(mlxsw_core->thermal); 1099 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1100 if (!reload) 1101 devlink_unregister(devlink); 1102 mlxsw_emad_fini(mlxsw_core); 1103 kfree(mlxsw_core->lag.mapping); 1104 mlxsw_ports_fini(mlxsw_core); 1105 if (!reload) 1106 devlink_resources_unregister(devlink, NULL); 1107 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 1108 1109 return; 1110 1111 reload_fail_deinit: 1112 devlink_unregister(devlink); 1113 devlink_resources_unregister(devlink, NULL); 1114 devlink_free(devlink); 1115 } 1116 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 1117 1118 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 1119 const struct mlxsw_tx_info *tx_info) 1120 { 1121 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 1122 tx_info); 1123 } 1124 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 1125 1126 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1127 const struct mlxsw_tx_info *tx_info) 1128 { 1129 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 1130 tx_info); 1131 } 1132 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 1133 1134 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 1135 const struct mlxsw_rx_listener *rxl_b) 1136 { 1137 return (rxl_a->func == rxl_b->func && 1138 rxl_a->local_port == rxl_b->local_port && 1139 rxl_a->trap_id == rxl_b->trap_id); 1140 } 1141 1142 static struct mlxsw_rx_listener_item * 1143 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 1144 const struct mlxsw_rx_listener *rxl, 1145 void *priv) 1146 { 1147 struct mlxsw_rx_listener_item *rxl_item; 1148 1149 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 1150 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 1151 rxl_item->priv == priv) 1152 return rxl_item; 1153 } 1154 return NULL; 1155 } 1156 1157 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 1158 const struct mlxsw_rx_listener *rxl, 1159 void *priv) 1160 { 1161 struct mlxsw_rx_listener_item *rxl_item; 1162 1163 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1164 if (rxl_item) 1165 return -EEXIST; 1166 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 1167 if (!rxl_item) 1168 return -ENOMEM; 1169 rxl_item->rxl = *rxl; 1170 rxl_item->priv = priv; 1171 1172 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 1173 return 0; 1174 } 1175 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 1176 1177 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 1178 const struct mlxsw_rx_listener *rxl, 1179 void *priv) 1180 { 1181 struct mlxsw_rx_listener_item *rxl_item; 1182 1183 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1184 if (!rxl_item) 1185 return; 1186 list_del_rcu(&rxl_item->list); 1187 synchronize_rcu(); 1188 kfree(rxl_item); 1189 } 1190 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 1191 1192 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 1193 void *priv) 1194 { 1195 struct mlxsw_event_listener_item *event_listener_item = priv; 1196 struct mlxsw_reg_info reg; 1197 char *payload; 1198 char *op_tlv = mlxsw_emad_op_tlv(skb); 1199 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 1200 1201 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 1202 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 1203 payload = mlxsw_emad_reg_payload(op_tlv); 1204 event_listener_item->el.func(®, payload, event_listener_item->priv); 1205 dev_kfree_skb(skb); 1206 } 1207 1208 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 1209 const struct mlxsw_event_listener *el_b) 1210 { 1211 return (el_a->func == el_b->func && 1212 el_a->trap_id == el_b->trap_id); 1213 } 1214 1215 static struct mlxsw_event_listener_item * 1216 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 1217 const struct mlxsw_event_listener *el, 1218 void *priv) 1219 { 1220 struct mlxsw_event_listener_item *el_item; 1221 1222 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 1223 if (__is_event_listener_equal(&el_item->el, el) && 1224 el_item->priv == priv) 1225 return el_item; 1226 } 1227 return NULL; 1228 } 1229 1230 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 1231 const struct mlxsw_event_listener *el, 1232 void *priv) 1233 { 1234 int err; 1235 struct mlxsw_event_listener_item *el_item; 1236 const struct mlxsw_rx_listener rxl = { 1237 .func = mlxsw_core_event_listener_func, 1238 .local_port = MLXSW_PORT_DONT_CARE, 1239 .trap_id = el->trap_id, 1240 }; 1241 1242 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1243 if (el_item) 1244 return -EEXIST; 1245 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1246 if (!el_item) 1247 return -ENOMEM; 1248 el_item->el = *el; 1249 el_item->priv = priv; 1250 1251 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1252 if (err) 1253 goto err_rx_listener_register; 1254 1255 /* No reason to save item if we did not manage to register an RX 1256 * listener for it. 1257 */ 1258 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1259 1260 return 0; 1261 1262 err_rx_listener_register: 1263 kfree(el_item); 1264 return err; 1265 } 1266 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1267 1268 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1269 const struct mlxsw_event_listener *el, 1270 void *priv) 1271 { 1272 struct mlxsw_event_listener_item *el_item; 1273 const struct mlxsw_rx_listener rxl = { 1274 .func = mlxsw_core_event_listener_func, 1275 .local_port = MLXSW_PORT_DONT_CARE, 1276 .trap_id = el->trap_id, 1277 }; 1278 1279 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1280 if (!el_item) 1281 return; 1282 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1283 list_del(&el_item->list); 1284 kfree(el_item); 1285 } 1286 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1287 1288 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 1289 const struct mlxsw_listener *listener, 1290 void *priv) 1291 { 1292 if (listener->is_event) 1293 return mlxsw_core_event_listener_register(mlxsw_core, 1294 &listener->u.event_listener, 1295 priv); 1296 else 1297 return mlxsw_core_rx_listener_register(mlxsw_core, 1298 &listener->u.rx_listener, 1299 priv); 1300 } 1301 1302 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 1303 const struct mlxsw_listener *listener, 1304 void *priv) 1305 { 1306 if (listener->is_event) 1307 mlxsw_core_event_listener_unregister(mlxsw_core, 1308 &listener->u.event_listener, 1309 priv); 1310 else 1311 mlxsw_core_rx_listener_unregister(mlxsw_core, 1312 &listener->u.rx_listener, 1313 priv); 1314 } 1315 1316 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 1317 const struct mlxsw_listener *listener, void *priv) 1318 { 1319 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1320 int err; 1321 1322 err = mlxsw_core_listener_register(mlxsw_core, listener, priv); 1323 if (err) 1324 return err; 1325 1326 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, 1327 listener->trap_group, listener->is_ctrl); 1328 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1329 if (err) 1330 goto err_trap_set; 1331 1332 return 0; 1333 1334 err_trap_set: 1335 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1336 return err; 1337 } 1338 EXPORT_SYMBOL(mlxsw_core_trap_register); 1339 1340 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 1341 const struct mlxsw_listener *listener, 1342 void *priv) 1343 { 1344 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1345 1346 if (!listener->is_event) { 1347 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, 1348 listener->trap_id, listener->trap_group, 1349 listener->is_ctrl); 1350 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1351 } 1352 1353 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1354 } 1355 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 1356 1357 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1358 { 1359 return atomic64_inc_return(&mlxsw_core->emad.tid); 1360 } 1361 1362 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1363 const struct mlxsw_reg_info *reg, 1364 char *payload, 1365 enum mlxsw_core_reg_access_type type, 1366 struct list_head *bulk_list, 1367 mlxsw_reg_trans_cb_t *cb, 1368 unsigned long cb_priv) 1369 { 1370 u64 tid = mlxsw_core_tid_get(mlxsw_core); 1371 struct mlxsw_reg_trans *trans; 1372 int err; 1373 1374 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1375 if (!trans) 1376 return -ENOMEM; 1377 1378 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1379 bulk_list, cb, cb_priv, tid); 1380 if (err) { 1381 kfree(trans); 1382 return err; 1383 } 1384 return 0; 1385 } 1386 1387 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1388 const struct mlxsw_reg_info *reg, char *payload, 1389 struct list_head *bulk_list, 1390 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1391 { 1392 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1393 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1394 bulk_list, cb, cb_priv); 1395 } 1396 EXPORT_SYMBOL(mlxsw_reg_trans_query); 1397 1398 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1399 const struct mlxsw_reg_info *reg, char *payload, 1400 struct list_head *bulk_list, 1401 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1402 { 1403 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1404 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1405 bulk_list, cb, cb_priv); 1406 } 1407 EXPORT_SYMBOL(mlxsw_reg_trans_write); 1408 1409 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1410 { 1411 struct mlxsw_core *mlxsw_core = trans->core; 1412 int err; 1413 1414 wait_for_completion(&trans->completion); 1415 cancel_delayed_work_sync(&trans->timeout_dw); 1416 err = trans->err; 1417 1418 if (trans->retries) 1419 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1420 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1421 if (err) 1422 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1423 trans->tid, trans->reg->id, 1424 mlxsw_reg_id_str(trans->reg->id), 1425 mlxsw_core_reg_access_type_str(trans->type), 1426 trans->emad_status, 1427 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1428 1429 list_del(&trans->bulk_list); 1430 kfree_rcu(trans, rcu); 1431 return err; 1432 } 1433 1434 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1435 { 1436 struct mlxsw_reg_trans *trans; 1437 struct mlxsw_reg_trans *tmp; 1438 int sum_err = 0; 1439 int err; 1440 1441 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1442 err = mlxsw_reg_trans_wait(trans); 1443 if (err && sum_err == 0) 1444 sum_err = err; /* first error to be returned */ 1445 } 1446 return sum_err; 1447 } 1448 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1449 1450 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1451 const struct mlxsw_reg_info *reg, 1452 char *payload, 1453 enum mlxsw_core_reg_access_type type) 1454 { 1455 enum mlxsw_emad_op_tlv_status status; 1456 int err, n_retry; 1457 bool reset_ok; 1458 char *in_mbox, *out_mbox, *tmp; 1459 1460 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1461 reg->id, mlxsw_reg_id_str(reg->id), 1462 mlxsw_core_reg_access_type_str(type)); 1463 1464 in_mbox = mlxsw_cmd_mbox_alloc(); 1465 if (!in_mbox) 1466 return -ENOMEM; 1467 1468 out_mbox = mlxsw_cmd_mbox_alloc(); 1469 if (!out_mbox) { 1470 err = -ENOMEM; 1471 goto free_in_mbox; 1472 } 1473 1474 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1475 mlxsw_core_tid_get(mlxsw_core)); 1476 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1477 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1478 1479 /* There is a special treatment needed for MRSR (reset) register. 1480 * The command interface will return error after the command 1481 * is executed, so tell the lower layer to expect it 1482 * and cope accordingly. 1483 */ 1484 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 1485 1486 n_retry = 0; 1487 retry: 1488 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 1489 if (!err) { 1490 err = mlxsw_emad_process_status(out_mbox, &status); 1491 if (err) { 1492 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1493 goto retry; 1494 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1495 status, mlxsw_emad_op_tlv_status_str(status)); 1496 } 1497 } 1498 1499 if (!err) 1500 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1501 reg->len); 1502 1503 mlxsw_cmd_mbox_free(out_mbox); 1504 free_in_mbox: 1505 mlxsw_cmd_mbox_free(in_mbox); 1506 if (err) 1507 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1508 reg->id, mlxsw_reg_id_str(reg->id), 1509 mlxsw_core_reg_access_type_str(type)); 1510 return err; 1511 } 1512 1513 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1514 char *payload, size_t payload_len, 1515 unsigned long cb_priv) 1516 { 1517 char *orig_payload = (char *) cb_priv; 1518 1519 memcpy(orig_payload, payload, payload_len); 1520 } 1521 1522 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1523 const struct mlxsw_reg_info *reg, 1524 char *payload, 1525 enum mlxsw_core_reg_access_type type) 1526 { 1527 LIST_HEAD(bulk_list); 1528 int err; 1529 1530 /* During initialization EMAD interface is not available to us, 1531 * so we default to command interface. We switch to EMAD interface 1532 * after setting the appropriate traps. 1533 */ 1534 if (!mlxsw_core->emad.use_emad) 1535 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1536 payload, type); 1537 1538 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1539 payload, type, &bulk_list, 1540 mlxsw_core_reg_access_cb, 1541 (unsigned long) payload); 1542 if (err) 1543 return err; 1544 return mlxsw_reg_trans_bulk_wait(&bulk_list); 1545 } 1546 1547 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1548 const struct mlxsw_reg_info *reg, char *payload) 1549 { 1550 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1551 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1552 } 1553 EXPORT_SYMBOL(mlxsw_reg_query); 1554 1555 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1556 const struct mlxsw_reg_info *reg, char *payload) 1557 { 1558 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1559 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1560 } 1561 EXPORT_SYMBOL(mlxsw_reg_write); 1562 1563 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1564 struct mlxsw_rx_info *rx_info) 1565 { 1566 struct mlxsw_rx_listener_item *rxl_item; 1567 const struct mlxsw_rx_listener *rxl; 1568 u8 local_port; 1569 bool found = false; 1570 1571 if (rx_info->is_lag) { 1572 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 1573 __func__, rx_info->u.lag_id, 1574 rx_info->trap_id); 1575 /* Upper layer does not care if the skb came from LAG or not, 1576 * so just get the local_port for the lag port and push it up. 1577 */ 1578 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 1579 rx_info->u.lag_id, 1580 rx_info->lag_port_index); 1581 } else { 1582 local_port = rx_info->u.sys_port; 1583 } 1584 1585 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 1586 __func__, local_port, rx_info->trap_id); 1587 1588 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1589 (local_port >= mlxsw_core->max_ports)) 1590 goto drop; 1591 1592 rcu_read_lock(); 1593 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1594 rxl = &rxl_item->rxl; 1595 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1596 rxl->local_port == local_port) && 1597 rxl->trap_id == rx_info->trap_id) { 1598 found = true; 1599 break; 1600 } 1601 } 1602 rcu_read_unlock(); 1603 if (!found) 1604 goto drop; 1605 1606 rxl->func(skb, local_port, rxl_item->priv); 1607 return; 1608 1609 drop: 1610 dev_kfree_skb(skb); 1611 } 1612 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1613 1614 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 1615 u16 lag_id, u8 port_index) 1616 { 1617 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 1618 port_index; 1619 } 1620 1621 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 1622 u16 lag_id, u8 port_index, u8 local_port) 1623 { 1624 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1625 lag_id, port_index); 1626 1627 mlxsw_core->lag.mapping[index] = local_port; 1628 } 1629 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 1630 1631 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 1632 u16 lag_id, u8 port_index) 1633 { 1634 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1635 lag_id, port_index); 1636 1637 return mlxsw_core->lag.mapping[index]; 1638 } 1639 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 1640 1641 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 1642 u16 lag_id, u8 local_port) 1643 { 1644 int i; 1645 1646 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 1647 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1648 lag_id, i); 1649 1650 if (mlxsw_core->lag.mapping[index] == local_port) 1651 mlxsw_core->lag.mapping[index] = 0; 1652 } 1653 } 1654 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1655 1656 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 1657 enum mlxsw_res_id res_id) 1658 { 1659 return mlxsw_res_valid(&mlxsw_core->res, res_id); 1660 } 1661 EXPORT_SYMBOL(mlxsw_core_res_valid); 1662 1663 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 1664 enum mlxsw_res_id res_id) 1665 { 1666 return mlxsw_res_get(&mlxsw_core->res, res_id); 1667 } 1668 EXPORT_SYMBOL(mlxsw_core_res_get); 1669 1670 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) 1671 { 1672 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1673 struct mlxsw_core_port *mlxsw_core_port = 1674 &mlxsw_core->ports[local_port]; 1675 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1676 int err; 1677 1678 mlxsw_core_port->local_port = local_port; 1679 err = devlink_port_register(devlink, devlink_port, local_port); 1680 if (err) 1681 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1682 return err; 1683 } 1684 EXPORT_SYMBOL(mlxsw_core_port_init); 1685 1686 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) 1687 { 1688 struct mlxsw_core_port *mlxsw_core_port = 1689 &mlxsw_core->ports[local_port]; 1690 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1691 1692 devlink_port_unregister(devlink_port); 1693 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1694 } 1695 EXPORT_SYMBOL(mlxsw_core_port_fini); 1696 1697 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1698 void *port_driver_priv, struct net_device *dev, 1699 u32 port_number, bool split, 1700 u32 split_port_subnumber) 1701 { 1702 struct mlxsw_core_port *mlxsw_core_port = 1703 &mlxsw_core->ports[local_port]; 1704 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1705 1706 mlxsw_core_port->port_driver_priv = port_driver_priv; 1707 devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, 1708 port_number, split, split_port_subnumber); 1709 devlink_port_type_eth_set(devlink_port, dev); 1710 } 1711 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 1712 1713 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1714 void *port_driver_priv) 1715 { 1716 struct mlxsw_core_port *mlxsw_core_port = 1717 &mlxsw_core->ports[local_port]; 1718 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1719 1720 mlxsw_core_port->port_driver_priv = port_driver_priv; 1721 devlink_port_type_ib_set(devlink_port, NULL); 1722 } 1723 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 1724 1725 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, 1726 void *port_driver_priv) 1727 { 1728 struct mlxsw_core_port *mlxsw_core_port = 1729 &mlxsw_core->ports[local_port]; 1730 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1731 1732 mlxsw_core_port->port_driver_priv = port_driver_priv; 1733 devlink_port_type_clear(devlink_port); 1734 } 1735 EXPORT_SYMBOL(mlxsw_core_port_clear); 1736 1737 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 1738 u8 local_port) 1739 { 1740 struct mlxsw_core_port *mlxsw_core_port = 1741 &mlxsw_core->ports[local_port]; 1742 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1743 1744 return devlink_port->type; 1745 } 1746 EXPORT_SYMBOL(mlxsw_core_port_type_get); 1747 1748 int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, 1749 u8 local_port, char *name, size_t len) 1750 { 1751 struct mlxsw_core_port *mlxsw_core_port = 1752 &mlxsw_core->ports[local_port]; 1753 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1754 1755 return devlink_port_get_phys_port_name(devlink_port, name, len); 1756 } 1757 EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); 1758 1759 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1760 const char *buf, size_t size) 1761 { 1762 __be32 *m = (__be32 *) buf; 1763 int i; 1764 int count = size / sizeof(__be32); 1765 1766 for (i = count - 1; i >= 0; i--) 1767 if (m[i]) 1768 break; 1769 i++; 1770 count = i ? i : 1; 1771 for (i = 0; i < count; i += 4) 1772 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1773 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1774 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1775 } 1776 1777 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1778 u32 in_mod, bool out_mbox_direct, bool reset_ok, 1779 char *in_mbox, size_t in_mbox_size, 1780 char *out_mbox, size_t out_mbox_size) 1781 { 1782 u8 status; 1783 int err; 1784 1785 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1786 if (!mlxsw_core->bus->cmd_exec) 1787 return -EOPNOTSUPP; 1788 1789 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1790 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1791 if (in_mbox) { 1792 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1793 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1794 } 1795 1796 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1797 opcode_mod, in_mod, out_mbox_direct, 1798 in_mbox, in_mbox_size, 1799 out_mbox, out_mbox_size, &status); 1800 1801 if (!err && out_mbox) { 1802 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1803 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1804 } 1805 1806 if (reset_ok && err == -EIO && 1807 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 1808 err = 0; 1809 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1810 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1811 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1812 in_mod, status, mlxsw_cmd_status_str(status)); 1813 } else if (err == -ETIMEDOUT) { 1814 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1815 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1816 in_mod); 1817 } 1818 1819 return err; 1820 } 1821 EXPORT_SYMBOL(mlxsw_cmd_exec); 1822 1823 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1824 { 1825 return queue_delayed_work(mlxsw_wq, dwork, delay); 1826 } 1827 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1828 1829 bool mlxsw_core_schedule_work(struct work_struct *work) 1830 { 1831 return queue_work(mlxsw_owq, work); 1832 } 1833 EXPORT_SYMBOL(mlxsw_core_schedule_work); 1834 1835 void mlxsw_core_flush_owq(void) 1836 { 1837 flush_workqueue(mlxsw_owq); 1838 } 1839 EXPORT_SYMBOL(mlxsw_core_flush_owq); 1840 1841 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 1842 const struct mlxsw_config_profile *profile, 1843 u64 *p_single_size, u64 *p_double_size, 1844 u64 *p_linear_size) 1845 { 1846 struct mlxsw_driver *driver = mlxsw_core->driver; 1847 1848 if (!driver->kvd_sizes_get) 1849 return -EINVAL; 1850 1851 return driver->kvd_sizes_get(mlxsw_core, profile, 1852 p_single_size, p_double_size, 1853 p_linear_size); 1854 } 1855 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 1856 1857 static int __init mlxsw_core_module_init(void) 1858 { 1859 int err; 1860 1861 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 1862 if (!mlxsw_wq) 1863 return -ENOMEM; 1864 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 1865 mlxsw_core_driver_name); 1866 if (!mlxsw_owq) { 1867 err = -ENOMEM; 1868 goto err_alloc_ordered_workqueue; 1869 } 1870 return 0; 1871 1872 err_alloc_ordered_workqueue: 1873 destroy_workqueue(mlxsw_wq); 1874 return err; 1875 } 1876 1877 static void __exit mlxsw_core_module_exit(void) 1878 { 1879 destroy_workqueue(mlxsw_owq); 1880 destroy_workqueue(mlxsw_wq); 1881 } 1882 1883 module_init(mlxsw_core_module_init); 1884 module_exit(mlxsw_core_module_exit); 1885 1886 MODULE_LICENSE("Dual BSD/GPL"); 1887 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1888 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1889