1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <asm/byteorder.h> 24 #include <net/devlink.h> 25 #include <trace/events/devlink.h> 26 27 #include "core.h" 28 #include "item.h" 29 #include "cmd.h" 30 #include "port.h" 31 #include "trap.h" 32 #include "emad.h" 33 #include "reg.h" 34 #include "resources.h" 35 36 static LIST_HEAD(mlxsw_core_driver_list); 37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 38 39 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 40 41 static struct workqueue_struct *mlxsw_wq; 42 static struct workqueue_struct *mlxsw_owq; 43 44 struct mlxsw_core_port { 45 struct devlink_port devlink_port; 46 void *port_driver_priv; 47 u8 local_port; 48 }; 49 50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 51 { 52 return mlxsw_core_port->port_driver_priv; 53 } 54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 55 56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 57 { 58 return mlxsw_core_port->port_driver_priv != NULL; 59 } 60 61 struct mlxsw_core { 62 struct mlxsw_driver *driver; 63 const struct mlxsw_bus *bus; 64 void *bus_priv; 65 const struct mlxsw_bus_info *bus_info; 66 struct workqueue_struct *emad_wq; 67 struct list_head rx_listener_list; 68 struct list_head event_listener_list; 69 struct { 70 atomic64_t tid; 71 struct list_head trans_list; 72 spinlock_t trans_list_lock; /* protects trans_list writes */ 73 bool use_emad; 74 } emad; 75 struct { 76 u8 *mapping; /* lag_id+port_index to local_port mapping */ 77 } lag; 78 struct mlxsw_res res; 79 struct mlxsw_hwmon *hwmon; 80 struct mlxsw_thermal *thermal; 81 struct mlxsw_core_port *ports; 82 unsigned int max_ports; 83 bool reload_fail; 84 unsigned long driver_priv[0]; 85 /* driver_priv has to be always the last item */ 86 }; 87 88 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 89 90 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) 91 { 92 /* Switch ports are numbered from 1 to queried value */ 93 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 94 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 95 MAX_SYSTEM_PORT) + 1; 96 else 97 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 98 99 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 100 sizeof(struct mlxsw_core_port), GFP_KERNEL); 101 if (!mlxsw_core->ports) 102 return -ENOMEM; 103 104 return 0; 105 } 106 107 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) 108 { 109 kfree(mlxsw_core->ports); 110 } 111 112 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 113 { 114 return mlxsw_core->max_ports; 115 } 116 EXPORT_SYMBOL(mlxsw_core_max_ports); 117 118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 119 { 120 return mlxsw_core->driver_priv; 121 } 122 EXPORT_SYMBOL(mlxsw_core_driver_priv); 123 124 struct mlxsw_rx_listener_item { 125 struct list_head list; 126 struct mlxsw_rx_listener rxl; 127 void *priv; 128 }; 129 130 struct mlxsw_event_listener_item { 131 struct list_head list; 132 struct mlxsw_event_listener el; 133 void *priv; 134 }; 135 136 /****************** 137 * EMAD processing 138 ******************/ 139 140 /* emad_eth_hdr_dmac 141 * Destination MAC in EMAD's Ethernet header. 142 * Must be set to 01:02:c9:00:00:01 143 */ 144 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 145 146 /* emad_eth_hdr_smac 147 * Source MAC in EMAD's Ethernet header. 148 * Must be set to 00:02:c9:01:02:03 149 */ 150 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 151 152 /* emad_eth_hdr_ethertype 153 * Ethertype in EMAD's Ethernet header. 154 * Must be set to 0x8932 155 */ 156 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 157 158 /* emad_eth_hdr_mlx_proto 159 * Mellanox protocol. 160 * Must be set to 0x0. 161 */ 162 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 163 164 /* emad_eth_hdr_ver 165 * Mellanox protocol version. 166 * Must be set to 0x0. 167 */ 168 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 169 170 /* emad_op_tlv_type 171 * Type of the TLV. 172 * Must be set to 0x1 (operation TLV). 173 */ 174 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 175 176 /* emad_op_tlv_len 177 * Length of the operation TLV in u32. 178 * Must be set to 0x4. 179 */ 180 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 181 182 /* emad_op_tlv_dr 183 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 184 * EMAD. DR TLV must follow. 185 * 186 * Note: Currently not supported and must not be set. 187 */ 188 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 189 190 /* emad_op_tlv_status 191 * Returned status in case of EMAD response. Must be set to 0 in case 192 * of EMAD request. 193 * 0x0 - success 194 * 0x1 - device is busy. Requester should retry 195 * 0x2 - Mellanox protocol version not supported 196 * 0x3 - unknown TLV 197 * 0x4 - register not supported 198 * 0x5 - operation class not supported 199 * 0x6 - EMAD method not supported 200 * 0x7 - bad parameter (e.g. port out of range) 201 * 0x8 - resource not available 202 * 0x9 - message receipt acknowledgment. Requester should retry 203 * 0x70 - internal error 204 */ 205 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 206 207 /* emad_op_tlv_register_id 208 * Register ID of register within register TLV. 209 */ 210 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 211 212 /* emad_op_tlv_r 213 * Response bit. Setting to 1 indicates Response, otherwise request. 214 */ 215 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 216 217 /* emad_op_tlv_method 218 * EMAD method type. 219 * 0x1 - query 220 * 0x2 - write 221 * 0x3 - send (currently not supported) 222 * 0x4 - event 223 */ 224 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 225 226 /* emad_op_tlv_class 227 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 228 */ 229 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 230 231 /* emad_op_tlv_tid 232 * EMAD transaction ID. Used for pairing request and response EMADs. 233 */ 234 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 235 236 /* emad_reg_tlv_type 237 * Type of the TLV. 238 * Must be set to 0x3 (register TLV). 239 */ 240 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 241 242 /* emad_reg_tlv_len 243 * Length of the operation TLV in u32. 244 */ 245 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 246 247 /* emad_end_tlv_type 248 * Type of the TLV. 249 * Must be set to 0x0 (end TLV). 250 */ 251 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 252 253 /* emad_end_tlv_len 254 * Length of the end TLV in u32. 255 * Must be set to 1. 256 */ 257 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 258 259 enum mlxsw_core_reg_access_type { 260 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 261 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 262 }; 263 264 static inline const char * 265 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 266 { 267 switch (type) { 268 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 269 return "query"; 270 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 271 return "write"; 272 } 273 BUG(); 274 } 275 276 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 277 { 278 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 279 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 280 } 281 282 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 283 const struct mlxsw_reg_info *reg, 284 char *payload) 285 { 286 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 287 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 288 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 289 } 290 291 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 292 const struct mlxsw_reg_info *reg, 293 enum mlxsw_core_reg_access_type type, 294 u64 tid) 295 { 296 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 297 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 298 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 299 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 300 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 301 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 302 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 303 mlxsw_emad_op_tlv_method_set(op_tlv, 304 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 305 else 306 mlxsw_emad_op_tlv_method_set(op_tlv, 307 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 308 mlxsw_emad_op_tlv_class_set(op_tlv, 309 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 310 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 311 } 312 313 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 314 { 315 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 316 317 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 318 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 319 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 320 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 321 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 322 323 skb_reset_mac_header(skb); 324 325 return 0; 326 } 327 328 static void mlxsw_emad_construct(struct sk_buff *skb, 329 const struct mlxsw_reg_info *reg, 330 char *payload, 331 enum mlxsw_core_reg_access_type type, 332 u64 tid) 333 { 334 char *buf; 335 336 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 337 mlxsw_emad_pack_end_tlv(buf); 338 339 buf = skb_push(skb, reg->len + sizeof(u32)); 340 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 341 342 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 343 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 344 345 mlxsw_emad_construct_eth_hdr(skb); 346 } 347 348 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 349 { 350 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 351 } 352 353 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 354 { 355 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 356 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 357 } 358 359 static char *mlxsw_emad_reg_payload(const char *op_tlv) 360 { 361 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 362 } 363 364 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 365 { 366 char *op_tlv; 367 368 op_tlv = mlxsw_emad_op_tlv(skb); 369 return mlxsw_emad_op_tlv_tid_get(op_tlv); 370 } 371 372 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 373 { 374 char *op_tlv; 375 376 op_tlv = mlxsw_emad_op_tlv(skb); 377 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 378 } 379 380 static int mlxsw_emad_process_status(char *op_tlv, 381 enum mlxsw_emad_op_tlv_status *p_status) 382 { 383 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 384 385 switch (*p_status) { 386 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 387 return 0; 388 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 389 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 390 return -EAGAIN; 391 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 392 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 393 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 394 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 395 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 396 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 397 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 398 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 399 default: 400 return -EIO; 401 } 402 } 403 404 static int 405 mlxsw_emad_process_status_skb(struct sk_buff *skb, 406 enum mlxsw_emad_op_tlv_status *p_status) 407 { 408 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 409 } 410 411 struct mlxsw_reg_trans { 412 struct list_head list; 413 struct list_head bulk_list; 414 struct mlxsw_core *core; 415 struct sk_buff *tx_skb; 416 struct mlxsw_tx_info tx_info; 417 struct delayed_work timeout_dw; 418 unsigned int retries; 419 u64 tid; 420 struct completion completion; 421 atomic_t active; 422 mlxsw_reg_trans_cb_t *cb; 423 unsigned long cb_priv; 424 const struct mlxsw_reg_info *reg; 425 enum mlxsw_core_reg_access_type type; 426 int err; 427 enum mlxsw_emad_op_tlv_status emad_status; 428 struct rcu_head rcu; 429 }; 430 431 #define MLXSW_EMAD_TIMEOUT_MS 200 432 433 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 434 { 435 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 436 437 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); 438 } 439 440 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 441 struct mlxsw_reg_trans *trans) 442 { 443 struct sk_buff *skb; 444 int err; 445 446 skb = skb_copy(trans->tx_skb, GFP_KERNEL); 447 if (!skb) 448 return -ENOMEM; 449 450 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 451 skb->data + mlxsw_core->driver->txhdr_len, 452 skb->len - mlxsw_core->driver->txhdr_len); 453 454 atomic_set(&trans->active, 1); 455 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 456 if (err) { 457 dev_kfree_skb(skb); 458 return err; 459 } 460 mlxsw_emad_trans_timeout_schedule(trans); 461 return 0; 462 } 463 464 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 465 { 466 struct mlxsw_core *mlxsw_core = trans->core; 467 468 dev_kfree_skb(trans->tx_skb); 469 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 470 list_del_rcu(&trans->list); 471 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 472 trans->err = err; 473 complete(&trans->completion); 474 } 475 476 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 477 struct mlxsw_reg_trans *trans) 478 { 479 int err; 480 481 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 482 trans->retries++; 483 err = mlxsw_emad_transmit(trans->core, trans); 484 if (err == 0) 485 return; 486 } else { 487 err = -EIO; 488 } 489 mlxsw_emad_trans_finish(trans, err); 490 } 491 492 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 493 { 494 struct mlxsw_reg_trans *trans = container_of(work, 495 struct mlxsw_reg_trans, 496 timeout_dw.work); 497 498 if (!atomic_dec_and_test(&trans->active)) 499 return; 500 501 mlxsw_emad_transmit_retry(trans->core, trans); 502 } 503 504 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 505 struct mlxsw_reg_trans *trans, 506 struct sk_buff *skb) 507 { 508 int err; 509 510 if (!atomic_dec_and_test(&trans->active)) 511 return; 512 513 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 514 if (err == -EAGAIN) { 515 mlxsw_emad_transmit_retry(mlxsw_core, trans); 516 } else { 517 if (err == 0) { 518 char *op_tlv = mlxsw_emad_op_tlv(skb); 519 520 if (trans->cb) 521 trans->cb(mlxsw_core, 522 mlxsw_emad_reg_payload(op_tlv), 523 trans->reg->len, trans->cb_priv); 524 } 525 mlxsw_emad_trans_finish(trans, err); 526 } 527 } 528 529 /* called with rcu read lock held */ 530 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 531 void *priv) 532 { 533 struct mlxsw_core *mlxsw_core = priv; 534 struct mlxsw_reg_trans *trans; 535 536 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 537 skb->data, skb->len); 538 539 if (!mlxsw_emad_is_resp(skb)) 540 goto free_skb; 541 542 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 543 if (mlxsw_emad_get_tid(skb) == trans->tid) { 544 mlxsw_emad_process_response(mlxsw_core, trans, skb); 545 break; 546 } 547 } 548 549 free_skb: 550 dev_kfree_skb(skb); 551 } 552 553 static const struct mlxsw_listener mlxsw_emad_rx_listener = 554 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 555 EMAD, DISCARD); 556 557 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 558 { 559 struct workqueue_struct *emad_wq; 560 u64 tid; 561 int err; 562 563 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 564 return 0; 565 566 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); 567 if (!emad_wq) 568 return -ENOMEM; 569 mlxsw_core->emad_wq = emad_wq; 570 571 /* Set the upper 32 bits of the transaction ID field to a random 572 * number. This allows us to discard EMADs addressed to other 573 * devices. 574 */ 575 get_random_bytes(&tid, 4); 576 tid <<= 32; 577 atomic64_set(&mlxsw_core->emad.tid, tid); 578 579 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 580 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 581 582 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 583 mlxsw_core); 584 if (err) 585 return err; 586 587 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 588 if (err) 589 goto err_emad_trap_set; 590 mlxsw_core->emad.use_emad = true; 591 592 return 0; 593 594 err_emad_trap_set: 595 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 596 mlxsw_core); 597 destroy_workqueue(mlxsw_core->emad_wq); 598 return err; 599 } 600 601 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 602 { 603 604 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 605 return; 606 607 mlxsw_core->emad.use_emad = false; 608 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 609 mlxsw_core); 610 destroy_workqueue(mlxsw_core->emad_wq); 611 } 612 613 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 614 u16 reg_len) 615 { 616 struct sk_buff *skb; 617 u16 emad_len; 618 619 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 620 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 621 sizeof(u32) + mlxsw_core->driver->txhdr_len); 622 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 623 return NULL; 624 625 skb = netdev_alloc_skb(NULL, emad_len); 626 if (!skb) 627 return NULL; 628 memset(skb->data, 0, emad_len); 629 skb_reserve(skb, emad_len); 630 631 return skb; 632 } 633 634 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 635 const struct mlxsw_reg_info *reg, 636 char *payload, 637 enum mlxsw_core_reg_access_type type, 638 struct mlxsw_reg_trans *trans, 639 struct list_head *bulk_list, 640 mlxsw_reg_trans_cb_t *cb, 641 unsigned long cb_priv, u64 tid) 642 { 643 struct sk_buff *skb; 644 int err; 645 646 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 647 tid, reg->id, mlxsw_reg_id_str(reg->id), 648 mlxsw_core_reg_access_type_str(type)); 649 650 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 651 if (!skb) 652 return -ENOMEM; 653 654 list_add_tail(&trans->bulk_list, bulk_list); 655 trans->core = mlxsw_core; 656 trans->tx_skb = skb; 657 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 658 trans->tx_info.is_emad = true; 659 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 660 trans->tid = tid; 661 init_completion(&trans->completion); 662 trans->cb = cb; 663 trans->cb_priv = cb_priv; 664 trans->reg = reg; 665 trans->type = type; 666 667 mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 668 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 669 670 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 671 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 672 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 673 err = mlxsw_emad_transmit(mlxsw_core, trans); 674 if (err) 675 goto err_out; 676 return 0; 677 678 err_out: 679 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 680 list_del_rcu(&trans->list); 681 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 682 list_del(&trans->bulk_list); 683 dev_kfree_skb(trans->tx_skb); 684 return err; 685 } 686 687 /***************** 688 * Core functions 689 *****************/ 690 691 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 692 { 693 spin_lock(&mlxsw_core_driver_list_lock); 694 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 695 spin_unlock(&mlxsw_core_driver_list_lock); 696 return 0; 697 } 698 EXPORT_SYMBOL(mlxsw_core_driver_register); 699 700 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 701 { 702 spin_lock(&mlxsw_core_driver_list_lock); 703 list_del(&mlxsw_driver->list); 704 spin_unlock(&mlxsw_core_driver_list_lock); 705 } 706 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 707 708 static struct mlxsw_driver *__driver_find(const char *kind) 709 { 710 struct mlxsw_driver *mlxsw_driver; 711 712 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 713 if (strcmp(mlxsw_driver->kind, kind) == 0) 714 return mlxsw_driver; 715 } 716 return NULL; 717 } 718 719 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 720 { 721 struct mlxsw_driver *mlxsw_driver; 722 723 spin_lock(&mlxsw_core_driver_list_lock); 724 mlxsw_driver = __driver_find(kind); 725 spin_unlock(&mlxsw_core_driver_list_lock); 726 return mlxsw_driver; 727 } 728 729 static int mlxsw_devlink_port_split(struct devlink *devlink, 730 unsigned int port_index, 731 unsigned int count, 732 struct netlink_ext_ack *extack) 733 { 734 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 735 736 if (port_index >= mlxsw_core->max_ports) { 737 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 738 return -EINVAL; 739 } 740 if (!mlxsw_core->driver->port_split) 741 return -EOPNOTSUPP; 742 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, 743 extack); 744 } 745 746 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 747 unsigned int port_index, 748 struct netlink_ext_ack *extack) 749 { 750 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 751 752 if (port_index >= mlxsw_core->max_ports) { 753 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 754 return -EINVAL; 755 } 756 if (!mlxsw_core->driver->port_unsplit) 757 return -EOPNOTSUPP; 758 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, 759 extack); 760 } 761 762 static int 763 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 764 unsigned int sb_index, u16 pool_index, 765 struct devlink_sb_pool_info *pool_info) 766 { 767 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 768 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 769 770 if (!mlxsw_driver->sb_pool_get) 771 return -EOPNOTSUPP; 772 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 773 pool_index, pool_info); 774 } 775 776 static int 777 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 778 unsigned int sb_index, u16 pool_index, u32 size, 779 enum devlink_sb_threshold_type threshold_type) 780 { 781 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 782 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 783 784 if (!mlxsw_driver->sb_pool_set) 785 return -EOPNOTSUPP; 786 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 787 pool_index, size, threshold_type); 788 } 789 790 static void *__dl_port(struct devlink_port *devlink_port) 791 { 792 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 793 } 794 795 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 796 enum devlink_port_type port_type) 797 { 798 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 799 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 800 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 801 802 if (!mlxsw_driver->port_type_set) 803 return -EOPNOTSUPP; 804 805 return mlxsw_driver->port_type_set(mlxsw_core, 806 mlxsw_core_port->local_port, 807 port_type); 808 } 809 810 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 811 unsigned int sb_index, u16 pool_index, 812 u32 *p_threshold) 813 { 814 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 815 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 816 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 817 818 if (!mlxsw_driver->sb_port_pool_get || 819 !mlxsw_core_port_check(mlxsw_core_port)) 820 return -EOPNOTSUPP; 821 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 822 pool_index, p_threshold); 823 } 824 825 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 826 unsigned int sb_index, u16 pool_index, 827 u32 threshold) 828 { 829 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 830 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 831 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 832 833 if (!mlxsw_driver->sb_port_pool_set || 834 !mlxsw_core_port_check(mlxsw_core_port)) 835 return -EOPNOTSUPP; 836 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 837 pool_index, threshold); 838 } 839 840 static int 841 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 842 unsigned int sb_index, u16 tc_index, 843 enum devlink_sb_pool_type pool_type, 844 u16 *p_pool_index, u32 *p_threshold) 845 { 846 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 847 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 848 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 849 850 if (!mlxsw_driver->sb_tc_pool_bind_get || 851 !mlxsw_core_port_check(mlxsw_core_port)) 852 return -EOPNOTSUPP; 853 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 854 tc_index, pool_type, 855 p_pool_index, p_threshold); 856 } 857 858 static int 859 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 860 unsigned int sb_index, u16 tc_index, 861 enum devlink_sb_pool_type pool_type, 862 u16 pool_index, u32 threshold) 863 { 864 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 865 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 866 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 867 868 if (!mlxsw_driver->sb_tc_pool_bind_set || 869 !mlxsw_core_port_check(mlxsw_core_port)) 870 return -EOPNOTSUPP; 871 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 872 tc_index, pool_type, 873 pool_index, threshold); 874 } 875 876 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 877 unsigned int sb_index) 878 { 879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 881 882 if (!mlxsw_driver->sb_occ_snapshot) 883 return -EOPNOTSUPP; 884 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 885 } 886 887 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 888 unsigned int sb_index) 889 { 890 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 891 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 892 893 if (!mlxsw_driver->sb_occ_max_clear) 894 return -EOPNOTSUPP; 895 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 896 } 897 898 static int 899 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 900 unsigned int sb_index, u16 pool_index, 901 u32 *p_cur, u32 *p_max) 902 { 903 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 904 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 905 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 906 907 if (!mlxsw_driver->sb_occ_port_pool_get || 908 !mlxsw_core_port_check(mlxsw_core_port)) 909 return -EOPNOTSUPP; 910 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 911 pool_index, p_cur, p_max); 912 } 913 914 static int 915 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 916 unsigned int sb_index, u16 tc_index, 917 enum devlink_sb_pool_type pool_type, 918 u32 *p_cur, u32 *p_max) 919 { 920 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 921 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 922 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 923 924 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 925 !mlxsw_core_port_check(mlxsw_core_port)) 926 return -EOPNOTSUPP; 927 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 928 sb_index, tc_index, 929 pool_type, p_cur, p_max); 930 } 931 932 static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, 933 struct netlink_ext_ack *extack) 934 { 935 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 936 int err; 937 938 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 939 return -EOPNOTSUPP; 940 941 mlxsw_core_bus_device_unregister(mlxsw_core, true); 942 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, 943 mlxsw_core->bus, 944 mlxsw_core->bus_priv, true, 945 devlink); 946 if (err) 947 mlxsw_core->reload_fail = true; 948 return err; 949 } 950 951 static const struct devlink_ops mlxsw_devlink_ops = { 952 .reload = mlxsw_devlink_core_bus_device_reload, 953 .port_type_set = mlxsw_devlink_port_type_set, 954 .port_split = mlxsw_devlink_port_split, 955 .port_unsplit = mlxsw_devlink_port_unsplit, 956 .sb_pool_get = mlxsw_devlink_sb_pool_get, 957 .sb_pool_set = mlxsw_devlink_sb_pool_set, 958 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 959 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 960 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 961 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 962 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 963 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 964 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 965 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 966 }; 967 968 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 969 const struct mlxsw_bus *mlxsw_bus, 970 void *bus_priv, bool reload, 971 struct devlink *devlink) 972 { 973 const char *device_kind = mlxsw_bus_info->device_kind; 974 struct mlxsw_core *mlxsw_core; 975 struct mlxsw_driver *mlxsw_driver; 976 struct mlxsw_res *res; 977 size_t alloc_size; 978 int err; 979 980 mlxsw_driver = mlxsw_core_driver_get(device_kind); 981 if (!mlxsw_driver) 982 return -EINVAL; 983 984 if (!reload) { 985 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 986 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); 987 if (!devlink) { 988 err = -ENOMEM; 989 goto err_devlink_alloc; 990 } 991 } 992 993 mlxsw_core = devlink_priv(devlink); 994 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 995 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 996 mlxsw_core->driver = mlxsw_driver; 997 mlxsw_core->bus = mlxsw_bus; 998 mlxsw_core->bus_priv = bus_priv; 999 mlxsw_core->bus_info = mlxsw_bus_info; 1000 1001 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; 1002 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); 1003 if (err) 1004 goto err_bus_init; 1005 1006 if (mlxsw_driver->resources_register && !reload) { 1007 err = mlxsw_driver->resources_register(mlxsw_core); 1008 if (err) 1009 goto err_register_resources; 1010 } 1011 1012 err = mlxsw_ports_init(mlxsw_core); 1013 if (err) 1014 goto err_ports_init; 1015 1016 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 1017 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 1018 alloc_size = sizeof(u8) * 1019 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 1020 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 1021 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 1022 if (!mlxsw_core->lag.mapping) { 1023 err = -ENOMEM; 1024 goto err_alloc_lag_mapping; 1025 } 1026 } 1027 1028 err = mlxsw_emad_init(mlxsw_core); 1029 if (err) 1030 goto err_emad_init; 1031 1032 if (!reload) { 1033 err = devlink_register(devlink, mlxsw_bus_info->dev); 1034 if (err) 1035 goto err_devlink_register; 1036 } 1037 1038 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 1039 if (err) 1040 goto err_hwmon_init; 1041 1042 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 1043 &mlxsw_core->thermal); 1044 if (err) 1045 goto err_thermal_init; 1046 1047 if (mlxsw_driver->init) { 1048 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); 1049 if (err) 1050 goto err_driver_init; 1051 } 1052 1053 return 0; 1054 1055 err_driver_init: 1056 mlxsw_thermal_fini(mlxsw_core->thermal); 1057 err_thermal_init: 1058 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1059 err_hwmon_init: 1060 if (!reload) 1061 devlink_unregister(devlink); 1062 err_devlink_register: 1063 mlxsw_emad_fini(mlxsw_core); 1064 err_emad_init: 1065 kfree(mlxsw_core->lag.mapping); 1066 err_alloc_lag_mapping: 1067 mlxsw_ports_fini(mlxsw_core); 1068 err_ports_init: 1069 if (!reload) 1070 devlink_resources_unregister(devlink, NULL); 1071 err_register_resources: 1072 mlxsw_bus->fini(bus_priv); 1073 err_bus_init: 1074 if (!reload) 1075 devlink_free(devlink); 1076 err_devlink_alloc: 1077 return err; 1078 } 1079 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 1080 1081 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 1082 bool reload) 1083 { 1084 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1085 1086 if (mlxsw_core->reload_fail) 1087 goto reload_fail; 1088 1089 if (mlxsw_core->driver->fini) 1090 mlxsw_core->driver->fini(mlxsw_core); 1091 mlxsw_thermal_fini(mlxsw_core->thermal); 1092 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1093 if (!reload) 1094 devlink_unregister(devlink); 1095 mlxsw_emad_fini(mlxsw_core); 1096 kfree(mlxsw_core->lag.mapping); 1097 mlxsw_ports_fini(mlxsw_core); 1098 if (!reload) 1099 devlink_resources_unregister(devlink, NULL); 1100 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 1101 if (reload) 1102 return; 1103 reload_fail: 1104 devlink_free(devlink); 1105 } 1106 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 1107 1108 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 1109 const struct mlxsw_tx_info *tx_info) 1110 { 1111 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 1112 tx_info); 1113 } 1114 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 1115 1116 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1117 const struct mlxsw_tx_info *tx_info) 1118 { 1119 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 1120 tx_info); 1121 } 1122 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 1123 1124 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 1125 const struct mlxsw_rx_listener *rxl_b) 1126 { 1127 return (rxl_a->func == rxl_b->func && 1128 rxl_a->local_port == rxl_b->local_port && 1129 rxl_a->trap_id == rxl_b->trap_id); 1130 } 1131 1132 static struct mlxsw_rx_listener_item * 1133 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 1134 const struct mlxsw_rx_listener *rxl, 1135 void *priv) 1136 { 1137 struct mlxsw_rx_listener_item *rxl_item; 1138 1139 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 1140 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 1141 rxl_item->priv == priv) 1142 return rxl_item; 1143 } 1144 return NULL; 1145 } 1146 1147 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 1148 const struct mlxsw_rx_listener *rxl, 1149 void *priv) 1150 { 1151 struct mlxsw_rx_listener_item *rxl_item; 1152 1153 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1154 if (rxl_item) 1155 return -EEXIST; 1156 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 1157 if (!rxl_item) 1158 return -ENOMEM; 1159 rxl_item->rxl = *rxl; 1160 rxl_item->priv = priv; 1161 1162 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 1163 return 0; 1164 } 1165 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 1166 1167 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 1168 const struct mlxsw_rx_listener *rxl, 1169 void *priv) 1170 { 1171 struct mlxsw_rx_listener_item *rxl_item; 1172 1173 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1174 if (!rxl_item) 1175 return; 1176 list_del_rcu(&rxl_item->list); 1177 synchronize_rcu(); 1178 kfree(rxl_item); 1179 } 1180 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 1181 1182 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 1183 void *priv) 1184 { 1185 struct mlxsw_event_listener_item *event_listener_item = priv; 1186 struct mlxsw_reg_info reg; 1187 char *payload; 1188 char *op_tlv = mlxsw_emad_op_tlv(skb); 1189 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 1190 1191 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 1192 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 1193 payload = mlxsw_emad_reg_payload(op_tlv); 1194 event_listener_item->el.func(®, payload, event_listener_item->priv); 1195 dev_kfree_skb(skb); 1196 } 1197 1198 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 1199 const struct mlxsw_event_listener *el_b) 1200 { 1201 return (el_a->func == el_b->func && 1202 el_a->trap_id == el_b->trap_id); 1203 } 1204 1205 static struct mlxsw_event_listener_item * 1206 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 1207 const struct mlxsw_event_listener *el, 1208 void *priv) 1209 { 1210 struct mlxsw_event_listener_item *el_item; 1211 1212 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 1213 if (__is_event_listener_equal(&el_item->el, el) && 1214 el_item->priv == priv) 1215 return el_item; 1216 } 1217 return NULL; 1218 } 1219 1220 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 1221 const struct mlxsw_event_listener *el, 1222 void *priv) 1223 { 1224 int err; 1225 struct mlxsw_event_listener_item *el_item; 1226 const struct mlxsw_rx_listener rxl = { 1227 .func = mlxsw_core_event_listener_func, 1228 .local_port = MLXSW_PORT_DONT_CARE, 1229 .trap_id = el->trap_id, 1230 }; 1231 1232 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1233 if (el_item) 1234 return -EEXIST; 1235 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1236 if (!el_item) 1237 return -ENOMEM; 1238 el_item->el = *el; 1239 el_item->priv = priv; 1240 1241 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1242 if (err) 1243 goto err_rx_listener_register; 1244 1245 /* No reason to save item if we did not manage to register an RX 1246 * listener for it. 1247 */ 1248 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1249 1250 return 0; 1251 1252 err_rx_listener_register: 1253 kfree(el_item); 1254 return err; 1255 } 1256 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1257 1258 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1259 const struct mlxsw_event_listener *el, 1260 void *priv) 1261 { 1262 struct mlxsw_event_listener_item *el_item; 1263 const struct mlxsw_rx_listener rxl = { 1264 .func = mlxsw_core_event_listener_func, 1265 .local_port = MLXSW_PORT_DONT_CARE, 1266 .trap_id = el->trap_id, 1267 }; 1268 1269 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1270 if (!el_item) 1271 return; 1272 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1273 list_del(&el_item->list); 1274 kfree(el_item); 1275 } 1276 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1277 1278 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 1279 const struct mlxsw_listener *listener, 1280 void *priv) 1281 { 1282 if (listener->is_event) 1283 return mlxsw_core_event_listener_register(mlxsw_core, 1284 &listener->u.event_listener, 1285 priv); 1286 else 1287 return mlxsw_core_rx_listener_register(mlxsw_core, 1288 &listener->u.rx_listener, 1289 priv); 1290 } 1291 1292 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 1293 const struct mlxsw_listener *listener, 1294 void *priv) 1295 { 1296 if (listener->is_event) 1297 mlxsw_core_event_listener_unregister(mlxsw_core, 1298 &listener->u.event_listener, 1299 priv); 1300 else 1301 mlxsw_core_rx_listener_unregister(mlxsw_core, 1302 &listener->u.rx_listener, 1303 priv); 1304 } 1305 1306 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 1307 const struct mlxsw_listener *listener, void *priv) 1308 { 1309 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1310 int err; 1311 1312 err = mlxsw_core_listener_register(mlxsw_core, listener, priv); 1313 if (err) 1314 return err; 1315 1316 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, 1317 listener->trap_group, listener->is_ctrl); 1318 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1319 if (err) 1320 goto err_trap_set; 1321 1322 return 0; 1323 1324 err_trap_set: 1325 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1326 return err; 1327 } 1328 EXPORT_SYMBOL(mlxsw_core_trap_register); 1329 1330 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 1331 const struct mlxsw_listener *listener, 1332 void *priv) 1333 { 1334 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1335 1336 if (!listener->is_event) { 1337 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, 1338 listener->trap_id, listener->trap_group, 1339 listener->is_ctrl); 1340 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1341 } 1342 1343 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1344 } 1345 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 1346 1347 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1348 { 1349 return atomic64_inc_return(&mlxsw_core->emad.tid); 1350 } 1351 1352 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1353 const struct mlxsw_reg_info *reg, 1354 char *payload, 1355 enum mlxsw_core_reg_access_type type, 1356 struct list_head *bulk_list, 1357 mlxsw_reg_trans_cb_t *cb, 1358 unsigned long cb_priv) 1359 { 1360 u64 tid = mlxsw_core_tid_get(mlxsw_core); 1361 struct mlxsw_reg_trans *trans; 1362 int err; 1363 1364 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1365 if (!trans) 1366 return -ENOMEM; 1367 1368 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1369 bulk_list, cb, cb_priv, tid); 1370 if (err) { 1371 kfree(trans); 1372 return err; 1373 } 1374 return 0; 1375 } 1376 1377 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1378 const struct mlxsw_reg_info *reg, char *payload, 1379 struct list_head *bulk_list, 1380 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1381 { 1382 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1383 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1384 bulk_list, cb, cb_priv); 1385 } 1386 EXPORT_SYMBOL(mlxsw_reg_trans_query); 1387 1388 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1389 const struct mlxsw_reg_info *reg, char *payload, 1390 struct list_head *bulk_list, 1391 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1392 { 1393 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1394 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1395 bulk_list, cb, cb_priv); 1396 } 1397 EXPORT_SYMBOL(mlxsw_reg_trans_write); 1398 1399 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1400 { 1401 struct mlxsw_core *mlxsw_core = trans->core; 1402 int err; 1403 1404 wait_for_completion(&trans->completion); 1405 cancel_delayed_work_sync(&trans->timeout_dw); 1406 err = trans->err; 1407 1408 if (trans->retries) 1409 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1410 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1411 if (err) 1412 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1413 trans->tid, trans->reg->id, 1414 mlxsw_reg_id_str(trans->reg->id), 1415 mlxsw_core_reg_access_type_str(trans->type), 1416 trans->emad_status, 1417 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1418 1419 list_del(&trans->bulk_list); 1420 kfree_rcu(trans, rcu); 1421 return err; 1422 } 1423 1424 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1425 { 1426 struct mlxsw_reg_trans *trans; 1427 struct mlxsw_reg_trans *tmp; 1428 int sum_err = 0; 1429 int err; 1430 1431 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1432 err = mlxsw_reg_trans_wait(trans); 1433 if (err && sum_err == 0) 1434 sum_err = err; /* first error to be returned */ 1435 } 1436 return sum_err; 1437 } 1438 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1439 1440 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1441 const struct mlxsw_reg_info *reg, 1442 char *payload, 1443 enum mlxsw_core_reg_access_type type) 1444 { 1445 enum mlxsw_emad_op_tlv_status status; 1446 int err, n_retry; 1447 bool reset_ok; 1448 char *in_mbox, *out_mbox, *tmp; 1449 1450 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1451 reg->id, mlxsw_reg_id_str(reg->id), 1452 mlxsw_core_reg_access_type_str(type)); 1453 1454 in_mbox = mlxsw_cmd_mbox_alloc(); 1455 if (!in_mbox) 1456 return -ENOMEM; 1457 1458 out_mbox = mlxsw_cmd_mbox_alloc(); 1459 if (!out_mbox) { 1460 err = -ENOMEM; 1461 goto free_in_mbox; 1462 } 1463 1464 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1465 mlxsw_core_tid_get(mlxsw_core)); 1466 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1467 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1468 1469 /* There is a special treatment needed for MRSR (reset) register. 1470 * The command interface will return error after the command 1471 * is executed, so tell the lower layer to expect it 1472 * and cope accordingly. 1473 */ 1474 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 1475 1476 n_retry = 0; 1477 retry: 1478 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 1479 if (!err) { 1480 err = mlxsw_emad_process_status(out_mbox, &status); 1481 if (err) { 1482 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1483 goto retry; 1484 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1485 status, mlxsw_emad_op_tlv_status_str(status)); 1486 } 1487 } 1488 1489 if (!err) 1490 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1491 reg->len); 1492 1493 mlxsw_cmd_mbox_free(out_mbox); 1494 free_in_mbox: 1495 mlxsw_cmd_mbox_free(in_mbox); 1496 if (err) 1497 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1498 reg->id, mlxsw_reg_id_str(reg->id), 1499 mlxsw_core_reg_access_type_str(type)); 1500 return err; 1501 } 1502 1503 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1504 char *payload, size_t payload_len, 1505 unsigned long cb_priv) 1506 { 1507 char *orig_payload = (char *) cb_priv; 1508 1509 memcpy(orig_payload, payload, payload_len); 1510 } 1511 1512 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1513 const struct mlxsw_reg_info *reg, 1514 char *payload, 1515 enum mlxsw_core_reg_access_type type) 1516 { 1517 LIST_HEAD(bulk_list); 1518 int err; 1519 1520 /* During initialization EMAD interface is not available to us, 1521 * so we default to command interface. We switch to EMAD interface 1522 * after setting the appropriate traps. 1523 */ 1524 if (!mlxsw_core->emad.use_emad) 1525 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1526 payload, type); 1527 1528 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1529 payload, type, &bulk_list, 1530 mlxsw_core_reg_access_cb, 1531 (unsigned long) payload); 1532 if (err) 1533 return err; 1534 return mlxsw_reg_trans_bulk_wait(&bulk_list); 1535 } 1536 1537 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1538 const struct mlxsw_reg_info *reg, char *payload) 1539 { 1540 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1541 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1542 } 1543 EXPORT_SYMBOL(mlxsw_reg_query); 1544 1545 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1546 const struct mlxsw_reg_info *reg, char *payload) 1547 { 1548 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1549 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1550 } 1551 EXPORT_SYMBOL(mlxsw_reg_write); 1552 1553 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1554 struct mlxsw_rx_info *rx_info) 1555 { 1556 struct mlxsw_rx_listener_item *rxl_item; 1557 const struct mlxsw_rx_listener *rxl; 1558 u8 local_port; 1559 bool found = false; 1560 1561 if (rx_info->is_lag) { 1562 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 1563 __func__, rx_info->u.lag_id, 1564 rx_info->trap_id); 1565 /* Upper layer does not care if the skb came from LAG or not, 1566 * so just get the local_port for the lag port and push it up. 1567 */ 1568 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 1569 rx_info->u.lag_id, 1570 rx_info->lag_port_index); 1571 } else { 1572 local_port = rx_info->u.sys_port; 1573 } 1574 1575 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 1576 __func__, local_port, rx_info->trap_id); 1577 1578 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1579 (local_port >= mlxsw_core->max_ports)) 1580 goto drop; 1581 1582 rcu_read_lock(); 1583 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1584 rxl = &rxl_item->rxl; 1585 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1586 rxl->local_port == local_port) && 1587 rxl->trap_id == rx_info->trap_id) { 1588 found = true; 1589 break; 1590 } 1591 } 1592 rcu_read_unlock(); 1593 if (!found) 1594 goto drop; 1595 1596 rxl->func(skb, local_port, rxl_item->priv); 1597 return; 1598 1599 drop: 1600 dev_kfree_skb(skb); 1601 } 1602 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1603 1604 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 1605 u16 lag_id, u8 port_index) 1606 { 1607 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 1608 port_index; 1609 } 1610 1611 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 1612 u16 lag_id, u8 port_index, u8 local_port) 1613 { 1614 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1615 lag_id, port_index); 1616 1617 mlxsw_core->lag.mapping[index] = local_port; 1618 } 1619 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 1620 1621 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 1622 u16 lag_id, u8 port_index) 1623 { 1624 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1625 lag_id, port_index); 1626 1627 return mlxsw_core->lag.mapping[index]; 1628 } 1629 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 1630 1631 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 1632 u16 lag_id, u8 local_port) 1633 { 1634 int i; 1635 1636 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 1637 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1638 lag_id, i); 1639 1640 if (mlxsw_core->lag.mapping[index] == local_port) 1641 mlxsw_core->lag.mapping[index] = 0; 1642 } 1643 } 1644 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1645 1646 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 1647 enum mlxsw_res_id res_id) 1648 { 1649 return mlxsw_res_valid(&mlxsw_core->res, res_id); 1650 } 1651 EXPORT_SYMBOL(mlxsw_core_res_valid); 1652 1653 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 1654 enum mlxsw_res_id res_id) 1655 { 1656 return mlxsw_res_get(&mlxsw_core->res, res_id); 1657 } 1658 EXPORT_SYMBOL(mlxsw_core_res_get); 1659 1660 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) 1661 { 1662 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1663 struct mlxsw_core_port *mlxsw_core_port = 1664 &mlxsw_core->ports[local_port]; 1665 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1666 int err; 1667 1668 mlxsw_core_port->local_port = local_port; 1669 err = devlink_port_register(devlink, devlink_port, local_port); 1670 if (err) 1671 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1672 return err; 1673 } 1674 EXPORT_SYMBOL(mlxsw_core_port_init); 1675 1676 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) 1677 { 1678 struct mlxsw_core_port *mlxsw_core_port = 1679 &mlxsw_core->ports[local_port]; 1680 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1681 1682 devlink_port_unregister(devlink_port); 1683 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1684 } 1685 EXPORT_SYMBOL(mlxsw_core_port_fini); 1686 1687 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1688 void *port_driver_priv, struct net_device *dev, 1689 u32 port_number, bool split, 1690 u32 split_port_subnumber) 1691 { 1692 struct mlxsw_core_port *mlxsw_core_port = 1693 &mlxsw_core->ports[local_port]; 1694 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1695 1696 mlxsw_core_port->port_driver_priv = port_driver_priv; 1697 devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, 1698 port_number, split, split_port_subnumber); 1699 devlink_port_type_eth_set(devlink_port, dev); 1700 } 1701 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 1702 1703 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1704 void *port_driver_priv) 1705 { 1706 struct mlxsw_core_port *mlxsw_core_port = 1707 &mlxsw_core->ports[local_port]; 1708 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1709 1710 mlxsw_core_port->port_driver_priv = port_driver_priv; 1711 devlink_port_type_ib_set(devlink_port, NULL); 1712 } 1713 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 1714 1715 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, 1716 void *port_driver_priv) 1717 { 1718 struct mlxsw_core_port *mlxsw_core_port = 1719 &mlxsw_core->ports[local_port]; 1720 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1721 1722 mlxsw_core_port->port_driver_priv = port_driver_priv; 1723 devlink_port_type_clear(devlink_port); 1724 } 1725 EXPORT_SYMBOL(mlxsw_core_port_clear); 1726 1727 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 1728 u8 local_port) 1729 { 1730 struct mlxsw_core_port *mlxsw_core_port = 1731 &mlxsw_core->ports[local_port]; 1732 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1733 1734 return devlink_port->type; 1735 } 1736 EXPORT_SYMBOL(mlxsw_core_port_type_get); 1737 1738 int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, 1739 u8 local_port, char *name, size_t len) 1740 { 1741 struct mlxsw_core_port *mlxsw_core_port = 1742 &mlxsw_core->ports[local_port]; 1743 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1744 1745 return devlink_port_get_phys_port_name(devlink_port, name, len); 1746 } 1747 EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); 1748 1749 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1750 const char *buf, size_t size) 1751 { 1752 __be32 *m = (__be32 *) buf; 1753 int i; 1754 int count = size / sizeof(__be32); 1755 1756 for (i = count - 1; i >= 0; i--) 1757 if (m[i]) 1758 break; 1759 i++; 1760 count = i ? i : 1; 1761 for (i = 0; i < count; i += 4) 1762 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1763 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1764 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1765 } 1766 1767 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1768 u32 in_mod, bool out_mbox_direct, bool reset_ok, 1769 char *in_mbox, size_t in_mbox_size, 1770 char *out_mbox, size_t out_mbox_size) 1771 { 1772 u8 status; 1773 int err; 1774 1775 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1776 if (!mlxsw_core->bus->cmd_exec) 1777 return -EOPNOTSUPP; 1778 1779 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1780 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1781 if (in_mbox) { 1782 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1783 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1784 } 1785 1786 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1787 opcode_mod, in_mod, out_mbox_direct, 1788 in_mbox, in_mbox_size, 1789 out_mbox, out_mbox_size, &status); 1790 1791 if (!err && out_mbox) { 1792 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1793 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1794 } 1795 1796 if (reset_ok && err == -EIO && 1797 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 1798 err = 0; 1799 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1800 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1801 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1802 in_mod, status, mlxsw_cmd_status_str(status)); 1803 } else if (err == -ETIMEDOUT) { 1804 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1805 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1806 in_mod); 1807 } 1808 1809 return err; 1810 } 1811 EXPORT_SYMBOL(mlxsw_cmd_exec); 1812 1813 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1814 { 1815 return queue_delayed_work(mlxsw_wq, dwork, delay); 1816 } 1817 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1818 1819 bool mlxsw_core_schedule_work(struct work_struct *work) 1820 { 1821 return queue_work(mlxsw_owq, work); 1822 } 1823 EXPORT_SYMBOL(mlxsw_core_schedule_work); 1824 1825 void mlxsw_core_flush_owq(void) 1826 { 1827 flush_workqueue(mlxsw_owq); 1828 } 1829 EXPORT_SYMBOL(mlxsw_core_flush_owq); 1830 1831 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 1832 const struct mlxsw_config_profile *profile, 1833 u64 *p_single_size, u64 *p_double_size, 1834 u64 *p_linear_size) 1835 { 1836 struct mlxsw_driver *driver = mlxsw_core->driver; 1837 1838 if (!driver->kvd_sizes_get) 1839 return -EINVAL; 1840 1841 return driver->kvd_sizes_get(mlxsw_core, profile, 1842 p_single_size, p_double_size, 1843 p_linear_size); 1844 } 1845 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 1846 1847 static int __init mlxsw_core_module_init(void) 1848 { 1849 int err; 1850 1851 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 1852 if (!mlxsw_wq) 1853 return -ENOMEM; 1854 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 1855 mlxsw_core_driver_name); 1856 if (!mlxsw_owq) { 1857 err = -ENOMEM; 1858 goto err_alloc_ordered_workqueue; 1859 } 1860 return 0; 1861 1862 err_alloc_ordered_workqueue: 1863 destroy_workqueue(mlxsw_wq); 1864 return err; 1865 } 1866 1867 static void __exit mlxsw_core_module_exit(void) 1868 { 1869 destroy_workqueue(mlxsw_owq); 1870 destroy_workqueue(mlxsw_wq); 1871 } 1872 1873 module_init(mlxsw_core_module_init); 1874 module_exit(mlxsw_core_module_exit); 1875 1876 MODULE_LICENSE("Dual BSD/GPL"); 1877 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1878 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1879