1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/device.h> 7 #include <linux/export.h> 8 #include <linux/err.h> 9 #include <linux/if_link.h> 10 #include <linux/netdevice.h> 11 #include <linux/completion.h> 12 #include <linux/skbuff.h> 13 #include <linux/etherdevice.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/gfp.h> 17 #include <linux/random.h> 18 #include <linux/jiffies.h> 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/slab.h> 22 #include <linux/workqueue.h> 23 #include <asm/byteorder.h> 24 #include <net/devlink.h> 25 #include <trace/events/devlink.h> 26 27 #include "core.h" 28 #include "item.h" 29 #include "cmd.h" 30 #include "port.h" 31 #include "trap.h" 32 #include "emad.h" 33 #include "reg.h" 34 #include "resources.h" 35 36 static LIST_HEAD(mlxsw_core_driver_list); 37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 38 39 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 40 41 static struct workqueue_struct *mlxsw_wq; 42 static struct workqueue_struct *mlxsw_owq; 43 44 struct mlxsw_core_port { 45 struct devlink_port devlink_port; 46 void *port_driver_priv; 47 u8 local_port; 48 }; 49 50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 51 { 52 return mlxsw_core_port->port_driver_priv; 53 } 54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 55 56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 57 { 58 return mlxsw_core_port->port_driver_priv != NULL; 59 } 60 61 struct mlxsw_core { 62 struct mlxsw_driver *driver; 63 const struct mlxsw_bus *bus; 64 void *bus_priv; 65 const struct mlxsw_bus_info *bus_info; 66 struct workqueue_struct *emad_wq; 67 struct list_head rx_listener_list; 68 struct list_head event_listener_list; 69 struct { 70 atomic64_t tid; 71 struct list_head trans_list; 72 spinlock_t trans_list_lock; /* protects trans_list writes */ 73 bool use_emad; 74 } emad; 75 struct { 76 u8 *mapping; /* lag_id+port_index to local_port mapping */ 77 } lag; 78 struct mlxsw_res res; 79 struct mlxsw_hwmon *hwmon; 80 struct mlxsw_thermal *thermal; 81 struct mlxsw_core_port *ports; 82 unsigned int max_ports; 83 bool reload_fail; 84 unsigned long driver_priv[0]; 85 /* driver_priv has to be always the last item */ 86 }; 87 88 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 89 90 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) 91 { 92 /* Switch ports are numbered from 1 to queried value */ 93 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 94 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 95 MAX_SYSTEM_PORT) + 1; 96 else 97 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 98 99 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 100 sizeof(struct mlxsw_core_port), GFP_KERNEL); 101 if (!mlxsw_core->ports) 102 return -ENOMEM; 103 104 return 0; 105 } 106 107 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) 108 { 109 kfree(mlxsw_core->ports); 110 } 111 112 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 113 { 114 return mlxsw_core->max_ports; 115 } 116 EXPORT_SYMBOL(mlxsw_core_max_ports); 117 118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 119 { 120 return mlxsw_core->driver_priv; 121 } 122 EXPORT_SYMBOL(mlxsw_core_driver_priv); 123 124 struct mlxsw_rx_listener_item { 125 struct list_head list; 126 struct mlxsw_rx_listener rxl; 127 void *priv; 128 }; 129 130 struct mlxsw_event_listener_item { 131 struct list_head list; 132 struct mlxsw_event_listener el; 133 void *priv; 134 }; 135 136 /****************** 137 * EMAD processing 138 ******************/ 139 140 /* emad_eth_hdr_dmac 141 * Destination MAC in EMAD's Ethernet header. 142 * Must be set to 01:02:c9:00:00:01 143 */ 144 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 145 146 /* emad_eth_hdr_smac 147 * Source MAC in EMAD's Ethernet header. 148 * Must be set to 00:02:c9:01:02:03 149 */ 150 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 151 152 /* emad_eth_hdr_ethertype 153 * Ethertype in EMAD's Ethernet header. 154 * Must be set to 0x8932 155 */ 156 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 157 158 /* emad_eth_hdr_mlx_proto 159 * Mellanox protocol. 160 * Must be set to 0x0. 161 */ 162 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 163 164 /* emad_eth_hdr_ver 165 * Mellanox protocol version. 166 * Must be set to 0x0. 167 */ 168 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 169 170 /* emad_op_tlv_type 171 * Type of the TLV. 172 * Must be set to 0x1 (operation TLV). 173 */ 174 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 175 176 /* emad_op_tlv_len 177 * Length of the operation TLV in u32. 178 * Must be set to 0x4. 179 */ 180 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 181 182 /* emad_op_tlv_dr 183 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 184 * EMAD. DR TLV must follow. 185 * 186 * Note: Currently not supported and must not be set. 187 */ 188 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 189 190 /* emad_op_tlv_status 191 * Returned status in case of EMAD response. Must be set to 0 in case 192 * of EMAD request. 193 * 0x0 - success 194 * 0x1 - device is busy. Requester should retry 195 * 0x2 - Mellanox protocol version not supported 196 * 0x3 - unknown TLV 197 * 0x4 - register not supported 198 * 0x5 - operation class not supported 199 * 0x6 - EMAD method not supported 200 * 0x7 - bad parameter (e.g. port out of range) 201 * 0x8 - resource not available 202 * 0x9 - message receipt acknowledgment. Requester should retry 203 * 0x70 - internal error 204 */ 205 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 206 207 /* emad_op_tlv_register_id 208 * Register ID of register within register TLV. 209 */ 210 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 211 212 /* emad_op_tlv_r 213 * Response bit. Setting to 1 indicates Response, otherwise request. 214 */ 215 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 216 217 /* emad_op_tlv_method 218 * EMAD method type. 219 * 0x1 - query 220 * 0x2 - write 221 * 0x3 - send (currently not supported) 222 * 0x4 - event 223 */ 224 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 225 226 /* emad_op_tlv_class 227 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 228 */ 229 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 230 231 /* emad_op_tlv_tid 232 * EMAD transaction ID. Used for pairing request and response EMADs. 233 */ 234 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 235 236 /* emad_reg_tlv_type 237 * Type of the TLV. 238 * Must be set to 0x3 (register TLV). 239 */ 240 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 241 242 /* emad_reg_tlv_len 243 * Length of the operation TLV in u32. 244 */ 245 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 246 247 /* emad_end_tlv_type 248 * Type of the TLV. 249 * Must be set to 0x0 (end TLV). 250 */ 251 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 252 253 /* emad_end_tlv_len 254 * Length of the end TLV in u32. 255 * Must be set to 1. 256 */ 257 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 258 259 enum mlxsw_core_reg_access_type { 260 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 261 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 262 }; 263 264 static inline const char * 265 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 266 { 267 switch (type) { 268 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 269 return "query"; 270 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 271 return "write"; 272 } 273 BUG(); 274 } 275 276 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 277 { 278 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 279 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 280 } 281 282 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 283 const struct mlxsw_reg_info *reg, 284 char *payload) 285 { 286 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 287 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 288 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 289 } 290 291 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 292 const struct mlxsw_reg_info *reg, 293 enum mlxsw_core_reg_access_type type, 294 u64 tid) 295 { 296 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 297 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 298 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 299 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 300 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 301 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 302 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 303 mlxsw_emad_op_tlv_method_set(op_tlv, 304 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 305 else 306 mlxsw_emad_op_tlv_method_set(op_tlv, 307 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 308 mlxsw_emad_op_tlv_class_set(op_tlv, 309 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 310 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 311 } 312 313 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 314 { 315 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 316 317 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 318 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 319 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 320 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 321 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 322 323 skb_reset_mac_header(skb); 324 325 return 0; 326 } 327 328 static void mlxsw_emad_construct(struct sk_buff *skb, 329 const struct mlxsw_reg_info *reg, 330 char *payload, 331 enum mlxsw_core_reg_access_type type, 332 u64 tid) 333 { 334 char *buf; 335 336 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 337 mlxsw_emad_pack_end_tlv(buf); 338 339 buf = skb_push(skb, reg->len + sizeof(u32)); 340 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 341 342 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 343 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 344 345 mlxsw_emad_construct_eth_hdr(skb); 346 } 347 348 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 349 { 350 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 351 } 352 353 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 354 { 355 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 356 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 357 } 358 359 static char *mlxsw_emad_reg_payload(const char *op_tlv) 360 { 361 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 362 } 363 364 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 365 { 366 char *op_tlv; 367 368 op_tlv = mlxsw_emad_op_tlv(skb); 369 return mlxsw_emad_op_tlv_tid_get(op_tlv); 370 } 371 372 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 373 { 374 char *op_tlv; 375 376 op_tlv = mlxsw_emad_op_tlv(skb); 377 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 378 } 379 380 static int mlxsw_emad_process_status(char *op_tlv, 381 enum mlxsw_emad_op_tlv_status *p_status) 382 { 383 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 384 385 switch (*p_status) { 386 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 387 return 0; 388 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 389 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 390 return -EAGAIN; 391 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 392 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 393 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 394 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 395 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 396 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 397 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 398 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 399 default: 400 return -EIO; 401 } 402 } 403 404 static int 405 mlxsw_emad_process_status_skb(struct sk_buff *skb, 406 enum mlxsw_emad_op_tlv_status *p_status) 407 { 408 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 409 } 410 411 struct mlxsw_reg_trans { 412 struct list_head list; 413 struct list_head bulk_list; 414 struct mlxsw_core *core; 415 struct sk_buff *tx_skb; 416 struct mlxsw_tx_info tx_info; 417 struct delayed_work timeout_dw; 418 unsigned int retries; 419 u64 tid; 420 struct completion completion; 421 atomic_t active; 422 mlxsw_reg_trans_cb_t *cb; 423 unsigned long cb_priv; 424 const struct mlxsw_reg_info *reg; 425 enum mlxsw_core_reg_access_type type; 426 int err; 427 enum mlxsw_emad_op_tlv_status emad_status; 428 struct rcu_head rcu; 429 }; 430 431 #define MLXSW_EMAD_TIMEOUT_MS 200 432 433 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 434 { 435 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 436 437 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); 438 } 439 440 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 441 struct mlxsw_reg_trans *trans) 442 { 443 struct sk_buff *skb; 444 int err; 445 446 skb = skb_copy(trans->tx_skb, GFP_KERNEL); 447 if (!skb) 448 return -ENOMEM; 449 450 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 451 skb->data + mlxsw_core->driver->txhdr_len, 452 skb->len - mlxsw_core->driver->txhdr_len); 453 454 atomic_set(&trans->active, 1); 455 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 456 if (err) { 457 dev_kfree_skb(skb); 458 return err; 459 } 460 mlxsw_emad_trans_timeout_schedule(trans); 461 return 0; 462 } 463 464 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 465 { 466 struct mlxsw_core *mlxsw_core = trans->core; 467 468 dev_kfree_skb(trans->tx_skb); 469 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 470 list_del_rcu(&trans->list); 471 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 472 trans->err = err; 473 complete(&trans->completion); 474 } 475 476 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 477 struct mlxsw_reg_trans *trans) 478 { 479 int err; 480 481 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 482 trans->retries++; 483 err = mlxsw_emad_transmit(trans->core, trans); 484 if (err == 0) 485 return; 486 } else { 487 err = -EIO; 488 } 489 mlxsw_emad_trans_finish(trans, err); 490 } 491 492 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 493 { 494 struct mlxsw_reg_trans *trans = container_of(work, 495 struct mlxsw_reg_trans, 496 timeout_dw.work); 497 498 if (!atomic_dec_and_test(&trans->active)) 499 return; 500 501 mlxsw_emad_transmit_retry(trans->core, trans); 502 } 503 504 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 505 struct mlxsw_reg_trans *trans, 506 struct sk_buff *skb) 507 { 508 int err; 509 510 if (!atomic_dec_and_test(&trans->active)) 511 return; 512 513 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 514 if (err == -EAGAIN) { 515 mlxsw_emad_transmit_retry(mlxsw_core, trans); 516 } else { 517 if (err == 0) { 518 char *op_tlv = mlxsw_emad_op_tlv(skb); 519 520 if (trans->cb) 521 trans->cb(mlxsw_core, 522 mlxsw_emad_reg_payload(op_tlv), 523 trans->reg->len, trans->cb_priv); 524 } 525 mlxsw_emad_trans_finish(trans, err); 526 } 527 } 528 529 /* called with rcu read lock held */ 530 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 531 void *priv) 532 { 533 struct mlxsw_core *mlxsw_core = priv; 534 struct mlxsw_reg_trans *trans; 535 536 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 537 skb->data, skb->len); 538 539 if (!mlxsw_emad_is_resp(skb)) 540 goto free_skb; 541 542 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 543 if (mlxsw_emad_get_tid(skb) == trans->tid) { 544 mlxsw_emad_process_response(mlxsw_core, trans, skb); 545 break; 546 } 547 } 548 549 free_skb: 550 dev_kfree_skb(skb); 551 } 552 553 static const struct mlxsw_listener mlxsw_emad_rx_listener = 554 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 555 EMAD, DISCARD); 556 557 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 558 { 559 struct workqueue_struct *emad_wq; 560 u64 tid; 561 int err; 562 563 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 564 return 0; 565 566 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); 567 if (!emad_wq) 568 return -ENOMEM; 569 mlxsw_core->emad_wq = emad_wq; 570 571 /* Set the upper 32 bits of the transaction ID field to a random 572 * number. This allows us to discard EMADs addressed to other 573 * devices. 574 */ 575 get_random_bytes(&tid, 4); 576 tid <<= 32; 577 atomic64_set(&mlxsw_core->emad.tid, tid); 578 579 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 580 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 581 582 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 583 mlxsw_core); 584 if (err) 585 return err; 586 587 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 588 if (err) 589 goto err_emad_trap_set; 590 mlxsw_core->emad.use_emad = true; 591 592 return 0; 593 594 err_emad_trap_set: 595 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 596 mlxsw_core); 597 destroy_workqueue(mlxsw_core->emad_wq); 598 return err; 599 } 600 601 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 602 { 603 604 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 605 return; 606 607 mlxsw_core->emad.use_emad = false; 608 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 609 mlxsw_core); 610 destroy_workqueue(mlxsw_core->emad_wq); 611 } 612 613 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 614 u16 reg_len) 615 { 616 struct sk_buff *skb; 617 u16 emad_len; 618 619 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 620 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 621 sizeof(u32) + mlxsw_core->driver->txhdr_len); 622 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 623 return NULL; 624 625 skb = netdev_alloc_skb(NULL, emad_len); 626 if (!skb) 627 return NULL; 628 memset(skb->data, 0, emad_len); 629 skb_reserve(skb, emad_len); 630 631 return skb; 632 } 633 634 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 635 const struct mlxsw_reg_info *reg, 636 char *payload, 637 enum mlxsw_core_reg_access_type type, 638 struct mlxsw_reg_trans *trans, 639 struct list_head *bulk_list, 640 mlxsw_reg_trans_cb_t *cb, 641 unsigned long cb_priv, u64 tid) 642 { 643 struct sk_buff *skb; 644 int err; 645 646 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 647 tid, reg->id, mlxsw_reg_id_str(reg->id), 648 mlxsw_core_reg_access_type_str(type)); 649 650 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 651 if (!skb) 652 return -ENOMEM; 653 654 list_add_tail(&trans->bulk_list, bulk_list); 655 trans->core = mlxsw_core; 656 trans->tx_skb = skb; 657 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 658 trans->tx_info.is_emad = true; 659 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 660 trans->tid = tid; 661 init_completion(&trans->completion); 662 trans->cb = cb; 663 trans->cb_priv = cb_priv; 664 trans->reg = reg; 665 trans->type = type; 666 667 mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 668 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 669 670 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 671 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 672 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 673 err = mlxsw_emad_transmit(mlxsw_core, trans); 674 if (err) 675 goto err_out; 676 return 0; 677 678 err_out: 679 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 680 list_del_rcu(&trans->list); 681 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 682 list_del(&trans->bulk_list); 683 dev_kfree_skb(trans->tx_skb); 684 return err; 685 } 686 687 /***************** 688 * Core functions 689 *****************/ 690 691 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 692 { 693 spin_lock(&mlxsw_core_driver_list_lock); 694 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 695 spin_unlock(&mlxsw_core_driver_list_lock); 696 return 0; 697 } 698 EXPORT_SYMBOL(mlxsw_core_driver_register); 699 700 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 701 { 702 spin_lock(&mlxsw_core_driver_list_lock); 703 list_del(&mlxsw_driver->list); 704 spin_unlock(&mlxsw_core_driver_list_lock); 705 } 706 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 707 708 static struct mlxsw_driver *__driver_find(const char *kind) 709 { 710 struct mlxsw_driver *mlxsw_driver; 711 712 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 713 if (strcmp(mlxsw_driver->kind, kind) == 0) 714 return mlxsw_driver; 715 } 716 return NULL; 717 } 718 719 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 720 { 721 struct mlxsw_driver *mlxsw_driver; 722 723 spin_lock(&mlxsw_core_driver_list_lock); 724 mlxsw_driver = __driver_find(kind); 725 spin_unlock(&mlxsw_core_driver_list_lock); 726 return mlxsw_driver; 727 } 728 729 static int mlxsw_devlink_port_split(struct devlink *devlink, 730 unsigned int port_index, 731 unsigned int count, 732 struct netlink_ext_ack *extack) 733 { 734 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 735 736 if (port_index >= mlxsw_core->max_ports) { 737 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 738 return -EINVAL; 739 } 740 if (!mlxsw_core->driver->port_split) 741 return -EOPNOTSUPP; 742 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, 743 extack); 744 } 745 746 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 747 unsigned int port_index, 748 struct netlink_ext_ack *extack) 749 { 750 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 751 752 if (port_index >= mlxsw_core->max_ports) { 753 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); 754 return -EINVAL; 755 } 756 if (!mlxsw_core->driver->port_unsplit) 757 return -EOPNOTSUPP; 758 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, 759 extack); 760 } 761 762 static int 763 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 764 unsigned int sb_index, u16 pool_index, 765 struct devlink_sb_pool_info *pool_info) 766 { 767 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 768 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 769 770 if (!mlxsw_driver->sb_pool_get) 771 return -EOPNOTSUPP; 772 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 773 pool_index, pool_info); 774 } 775 776 static int 777 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 778 unsigned int sb_index, u16 pool_index, u32 size, 779 enum devlink_sb_threshold_type threshold_type) 780 { 781 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 782 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 783 784 if (!mlxsw_driver->sb_pool_set) 785 return -EOPNOTSUPP; 786 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 787 pool_index, size, threshold_type); 788 } 789 790 static void *__dl_port(struct devlink_port *devlink_port) 791 { 792 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 793 } 794 795 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 796 enum devlink_port_type port_type) 797 { 798 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 799 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 800 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 801 802 if (!mlxsw_driver->port_type_set) 803 return -EOPNOTSUPP; 804 805 return mlxsw_driver->port_type_set(mlxsw_core, 806 mlxsw_core_port->local_port, 807 port_type); 808 } 809 810 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 811 unsigned int sb_index, u16 pool_index, 812 u32 *p_threshold) 813 { 814 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 815 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 816 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 817 818 if (!mlxsw_driver->sb_port_pool_get || 819 !mlxsw_core_port_check(mlxsw_core_port)) 820 return -EOPNOTSUPP; 821 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 822 pool_index, p_threshold); 823 } 824 825 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 826 unsigned int sb_index, u16 pool_index, 827 u32 threshold) 828 { 829 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 830 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 831 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 832 833 if (!mlxsw_driver->sb_port_pool_set || 834 !mlxsw_core_port_check(mlxsw_core_port)) 835 return -EOPNOTSUPP; 836 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 837 pool_index, threshold); 838 } 839 840 static int 841 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 842 unsigned int sb_index, u16 tc_index, 843 enum devlink_sb_pool_type pool_type, 844 u16 *p_pool_index, u32 *p_threshold) 845 { 846 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 847 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 848 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 849 850 if (!mlxsw_driver->sb_tc_pool_bind_get || 851 !mlxsw_core_port_check(mlxsw_core_port)) 852 return -EOPNOTSUPP; 853 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 854 tc_index, pool_type, 855 p_pool_index, p_threshold); 856 } 857 858 static int 859 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 860 unsigned int sb_index, u16 tc_index, 861 enum devlink_sb_pool_type pool_type, 862 u16 pool_index, u32 threshold) 863 { 864 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 865 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 866 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 867 868 if (!mlxsw_driver->sb_tc_pool_bind_set || 869 !mlxsw_core_port_check(mlxsw_core_port)) 870 return -EOPNOTSUPP; 871 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 872 tc_index, pool_type, 873 pool_index, threshold); 874 } 875 876 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 877 unsigned int sb_index) 878 { 879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 881 882 if (!mlxsw_driver->sb_occ_snapshot) 883 return -EOPNOTSUPP; 884 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 885 } 886 887 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 888 unsigned int sb_index) 889 { 890 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 891 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 892 893 if (!mlxsw_driver->sb_occ_max_clear) 894 return -EOPNOTSUPP; 895 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 896 } 897 898 static int 899 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 900 unsigned int sb_index, u16 pool_index, 901 u32 *p_cur, u32 *p_max) 902 { 903 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 904 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 905 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 906 907 if (!mlxsw_driver->sb_occ_port_pool_get || 908 !mlxsw_core_port_check(mlxsw_core_port)) 909 return -EOPNOTSUPP; 910 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 911 pool_index, p_cur, p_max); 912 } 913 914 static int 915 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 916 unsigned int sb_index, u16 tc_index, 917 enum devlink_sb_pool_type pool_type, 918 u32 *p_cur, u32 *p_max) 919 { 920 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 921 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 922 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 923 924 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 925 !mlxsw_core_port_check(mlxsw_core_port)) 926 return -EOPNOTSUPP; 927 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 928 sb_index, tc_index, 929 pool_type, p_cur, p_max); 930 } 931 932 static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, 933 struct netlink_ext_ack *extack) 934 { 935 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 936 int err; 937 938 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) 939 return -EOPNOTSUPP; 940 941 mlxsw_core_bus_device_unregister(mlxsw_core, true); 942 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, 943 mlxsw_core->bus, 944 mlxsw_core->bus_priv, true, 945 devlink); 946 if (err) 947 mlxsw_core->reload_fail = true; 948 return err; 949 } 950 951 static const struct devlink_ops mlxsw_devlink_ops = { 952 .reload = mlxsw_devlink_core_bus_device_reload, 953 .port_type_set = mlxsw_devlink_port_type_set, 954 .port_split = mlxsw_devlink_port_split, 955 .port_unsplit = mlxsw_devlink_port_unsplit, 956 .sb_pool_get = mlxsw_devlink_sb_pool_get, 957 .sb_pool_set = mlxsw_devlink_sb_pool_set, 958 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 959 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 960 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 961 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 962 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 963 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 964 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 965 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 966 }; 967 968 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 969 const struct mlxsw_bus *mlxsw_bus, 970 void *bus_priv, bool reload, 971 struct devlink *devlink) 972 { 973 const char *device_kind = mlxsw_bus_info->device_kind; 974 struct mlxsw_core *mlxsw_core; 975 struct mlxsw_driver *mlxsw_driver; 976 struct mlxsw_res *res; 977 size_t alloc_size; 978 int err; 979 980 mlxsw_driver = mlxsw_core_driver_get(device_kind); 981 if (!mlxsw_driver) 982 return -EINVAL; 983 984 if (!reload) { 985 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 986 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); 987 if (!devlink) { 988 err = -ENOMEM; 989 goto err_devlink_alloc; 990 } 991 } 992 993 mlxsw_core = devlink_priv(devlink); 994 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 995 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 996 mlxsw_core->driver = mlxsw_driver; 997 mlxsw_core->bus = mlxsw_bus; 998 mlxsw_core->bus_priv = bus_priv; 999 mlxsw_core->bus_info = mlxsw_bus_info; 1000 1001 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; 1002 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); 1003 if (err) 1004 goto err_bus_init; 1005 1006 if (mlxsw_driver->resources_register && !reload) { 1007 err = mlxsw_driver->resources_register(mlxsw_core); 1008 if (err) 1009 goto err_register_resources; 1010 } 1011 1012 err = mlxsw_ports_init(mlxsw_core); 1013 if (err) 1014 goto err_ports_init; 1015 1016 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 1017 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 1018 alloc_size = sizeof(u8) * 1019 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 1020 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 1021 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 1022 if (!mlxsw_core->lag.mapping) { 1023 err = -ENOMEM; 1024 goto err_alloc_lag_mapping; 1025 } 1026 } 1027 1028 err = mlxsw_emad_init(mlxsw_core); 1029 if (err) 1030 goto err_emad_init; 1031 1032 if (!reload) { 1033 err = devlink_register(devlink, mlxsw_bus_info->dev); 1034 if (err) 1035 goto err_devlink_register; 1036 } 1037 1038 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 1039 if (err) 1040 goto err_hwmon_init; 1041 1042 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 1043 &mlxsw_core->thermal); 1044 if (err) 1045 goto err_thermal_init; 1046 1047 if (mlxsw_driver->init) { 1048 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); 1049 if (err) 1050 goto err_driver_init; 1051 } 1052 1053 return 0; 1054 1055 err_driver_init: 1056 mlxsw_thermal_fini(mlxsw_core->thermal); 1057 err_thermal_init: 1058 err_hwmon_init: 1059 if (!reload) 1060 devlink_unregister(devlink); 1061 err_devlink_register: 1062 mlxsw_emad_fini(mlxsw_core); 1063 err_emad_init: 1064 kfree(mlxsw_core->lag.mapping); 1065 err_alloc_lag_mapping: 1066 mlxsw_ports_fini(mlxsw_core); 1067 err_ports_init: 1068 if (!reload) 1069 devlink_resources_unregister(devlink, NULL); 1070 err_register_resources: 1071 mlxsw_bus->fini(bus_priv); 1072 err_bus_init: 1073 if (!reload) 1074 devlink_free(devlink); 1075 err_devlink_alloc: 1076 return err; 1077 } 1078 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 1079 1080 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, 1081 bool reload) 1082 { 1083 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1084 1085 if (mlxsw_core->reload_fail) 1086 goto reload_fail; 1087 1088 if (mlxsw_core->driver->fini) 1089 mlxsw_core->driver->fini(mlxsw_core); 1090 mlxsw_thermal_fini(mlxsw_core->thermal); 1091 if (!reload) 1092 devlink_unregister(devlink); 1093 mlxsw_emad_fini(mlxsw_core); 1094 kfree(mlxsw_core->lag.mapping); 1095 mlxsw_ports_fini(mlxsw_core); 1096 if (!reload) 1097 devlink_resources_unregister(devlink, NULL); 1098 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 1099 if (reload) 1100 return; 1101 reload_fail: 1102 devlink_free(devlink); 1103 } 1104 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 1105 1106 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 1107 const struct mlxsw_tx_info *tx_info) 1108 { 1109 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 1110 tx_info); 1111 } 1112 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 1113 1114 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1115 const struct mlxsw_tx_info *tx_info) 1116 { 1117 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 1118 tx_info); 1119 } 1120 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 1121 1122 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 1123 const struct mlxsw_rx_listener *rxl_b) 1124 { 1125 return (rxl_a->func == rxl_b->func && 1126 rxl_a->local_port == rxl_b->local_port && 1127 rxl_a->trap_id == rxl_b->trap_id); 1128 } 1129 1130 static struct mlxsw_rx_listener_item * 1131 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 1132 const struct mlxsw_rx_listener *rxl, 1133 void *priv) 1134 { 1135 struct mlxsw_rx_listener_item *rxl_item; 1136 1137 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 1138 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 1139 rxl_item->priv == priv) 1140 return rxl_item; 1141 } 1142 return NULL; 1143 } 1144 1145 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 1146 const struct mlxsw_rx_listener *rxl, 1147 void *priv) 1148 { 1149 struct mlxsw_rx_listener_item *rxl_item; 1150 1151 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1152 if (rxl_item) 1153 return -EEXIST; 1154 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 1155 if (!rxl_item) 1156 return -ENOMEM; 1157 rxl_item->rxl = *rxl; 1158 rxl_item->priv = priv; 1159 1160 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 1161 return 0; 1162 } 1163 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 1164 1165 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 1166 const struct mlxsw_rx_listener *rxl, 1167 void *priv) 1168 { 1169 struct mlxsw_rx_listener_item *rxl_item; 1170 1171 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1172 if (!rxl_item) 1173 return; 1174 list_del_rcu(&rxl_item->list); 1175 synchronize_rcu(); 1176 kfree(rxl_item); 1177 } 1178 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 1179 1180 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 1181 void *priv) 1182 { 1183 struct mlxsw_event_listener_item *event_listener_item = priv; 1184 struct mlxsw_reg_info reg; 1185 char *payload; 1186 char *op_tlv = mlxsw_emad_op_tlv(skb); 1187 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 1188 1189 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 1190 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 1191 payload = mlxsw_emad_reg_payload(op_tlv); 1192 event_listener_item->el.func(®, payload, event_listener_item->priv); 1193 dev_kfree_skb(skb); 1194 } 1195 1196 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 1197 const struct mlxsw_event_listener *el_b) 1198 { 1199 return (el_a->func == el_b->func && 1200 el_a->trap_id == el_b->trap_id); 1201 } 1202 1203 static struct mlxsw_event_listener_item * 1204 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 1205 const struct mlxsw_event_listener *el, 1206 void *priv) 1207 { 1208 struct mlxsw_event_listener_item *el_item; 1209 1210 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 1211 if (__is_event_listener_equal(&el_item->el, el) && 1212 el_item->priv == priv) 1213 return el_item; 1214 } 1215 return NULL; 1216 } 1217 1218 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 1219 const struct mlxsw_event_listener *el, 1220 void *priv) 1221 { 1222 int err; 1223 struct mlxsw_event_listener_item *el_item; 1224 const struct mlxsw_rx_listener rxl = { 1225 .func = mlxsw_core_event_listener_func, 1226 .local_port = MLXSW_PORT_DONT_CARE, 1227 .trap_id = el->trap_id, 1228 }; 1229 1230 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1231 if (el_item) 1232 return -EEXIST; 1233 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1234 if (!el_item) 1235 return -ENOMEM; 1236 el_item->el = *el; 1237 el_item->priv = priv; 1238 1239 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1240 if (err) 1241 goto err_rx_listener_register; 1242 1243 /* No reason to save item if we did not manage to register an RX 1244 * listener for it. 1245 */ 1246 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1247 1248 return 0; 1249 1250 err_rx_listener_register: 1251 kfree(el_item); 1252 return err; 1253 } 1254 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1255 1256 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1257 const struct mlxsw_event_listener *el, 1258 void *priv) 1259 { 1260 struct mlxsw_event_listener_item *el_item; 1261 const struct mlxsw_rx_listener rxl = { 1262 .func = mlxsw_core_event_listener_func, 1263 .local_port = MLXSW_PORT_DONT_CARE, 1264 .trap_id = el->trap_id, 1265 }; 1266 1267 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1268 if (!el_item) 1269 return; 1270 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1271 list_del(&el_item->list); 1272 kfree(el_item); 1273 } 1274 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1275 1276 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 1277 const struct mlxsw_listener *listener, 1278 void *priv) 1279 { 1280 if (listener->is_event) 1281 return mlxsw_core_event_listener_register(mlxsw_core, 1282 &listener->u.event_listener, 1283 priv); 1284 else 1285 return mlxsw_core_rx_listener_register(mlxsw_core, 1286 &listener->u.rx_listener, 1287 priv); 1288 } 1289 1290 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 1291 const struct mlxsw_listener *listener, 1292 void *priv) 1293 { 1294 if (listener->is_event) 1295 mlxsw_core_event_listener_unregister(mlxsw_core, 1296 &listener->u.event_listener, 1297 priv); 1298 else 1299 mlxsw_core_rx_listener_unregister(mlxsw_core, 1300 &listener->u.rx_listener, 1301 priv); 1302 } 1303 1304 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 1305 const struct mlxsw_listener *listener, void *priv) 1306 { 1307 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1308 int err; 1309 1310 err = mlxsw_core_listener_register(mlxsw_core, listener, priv); 1311 if (err) 1312 return err; 1313 1314 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, 1315 listener->trap_group, listener->is_ctrl); 1316 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1317 if (err) 1318 goto err_trap_set; 1319 1320 return 0; 1321 1322 err_trap_set: 1323 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1324 return err; 1325 } 1326 EXPORT_SYMBOL(mlxsw_core_trap_register); 1327 1328 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 1329 const struct mlxsw_listener *listener, 1330 void *priv) 1331 { 1332 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1333 1334 if (!listener->is_event) { 1335 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, 1336 listener->trap_id, listener->trap_group, 1337 listener->is_ctrl); 1338 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1339 } 1340 1341 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1342 } 1343 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 1344 1345 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1346 { 1347 return atomic64_inc_return(&mlxsw_core->emad.tid); 1348 } 1349 1350 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1351 const struct mlxsw_reg_info *reg, 1352 char *payload, 1353 enum mlxsw_core_reg_access_type type, 1354 struct list_head *bulk_list, 1355 mlxsw_reg_trans_cb_t *cb, 1356 unsigned long cb_priv) 1357 { 1358 u64 tid = mlxsw_core_tid_get(mlxsw_core); 1359 struct mlxsw_reg_trans *trans; 1360 int err; 1361 1362 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1363 if (!trans) 1364 return -ENOMEM; 1365 1366 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1367 bulk_list, cb, cb_priv, tid); 1368 if (err) { 1369 kfree(trans); 1370 return err; 1371 } 1372 return 0; 1373 } 1374 1375 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1376 const struct mlxsw_reg_info *reg, char *payload, 1377 struct list_head *bulk_list, 1378 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1379 { 1380 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1381 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1382 bulk_list, cb, cb_priv); 1383 } 1384 EXPORT_SYMBOL(mlxsw_reg_trans_query); 1385 1386 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1387 const struct mlxsw_reg_info *reg, char *payload, 1388 struct list_head *bulk_list, 1389 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1390 { 1391 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1392 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1393 bulk_list, cb, cb_priv); 1394 } 1395 EXPORT_SYMBOL(mlxsw_reg_trans_write); 1396 1397 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1398 { 1399 struct mlxsw_core *mlxsw_core = trans->core; 1400 int err; 1401 1402 wait_for_completion(&trans->completion); 1403 cancel_delayed_work_sync(&trans->timeout_dw); 1404 err = trans->err; 1405 1406 if (trans->retries) 1407 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1408 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1409 if (err) 1410 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1411 trans->tid, trans->reg->id, 1412 mlxsw_reg_id_str(trans->reg->id), 1413 mlxsw_core_reg_access_type_str(trans->type), 1414 trans->emad_status, 1415 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1416 1417 list_del(&trans->bulk_list); 1418 kfree_rcu(trans, rcu); 1419 return err; 1420 } 1421 1422 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1423 { 1424 struct mlxsw_reg_trans *trans; 1425 struct mlxsw_reg_trans *tmp; 1426 int sum_err = 0; 1427 int err; 1428 1429 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1430 err = mlxsw_reg_trans_wait(trans); 1431 if (err && sum_err == 0) 1432 sum_err = err; /* first error to be returned */ 1433 } 1434 return sum_err; 1435 } 1436 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1437 1438 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1439 const struct mlxsw_reg_info *reg, 1440 char *payload, 1441 enum mlxsw_core_reg_access_type type) 1442 { 1443 enum mlxsw_emad_op_tlv_status status; 1444 int err, n_retry; 1445 bool reset_ok; 1446 char *in_mbox, *out_mbox, *tmp; 1447 1448 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1449 reg->id, mlxsw_reg_id_str(reg->id), 1450 mlxsw_core_reg_access_type_str(type)); 1451 1452 in_mbox = mlxsw_cmd_mbox_alloc(); 1453 if (!in_mbox) 1454 return -ENOMEM; 1455 1456 out_mbox = mlxsw_cmd_mbox_alloc(); 1457 if (!out_mbox) { 1458 err = -ENOMEM; 1459 goto free_in_mbox; 1460 } 1461 1462 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1463 mlxsw_core_tid_get(mlxsw_core)); 1464 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1465 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1466 1467 /* There is a special treatment needed for MRSR (reset) register. 1468 * The command interface will return error after the command 1469 * is executed, so tell the lower layer to expect it 1470 * and cope accordingly. 1471 */ 1472 reset_ok = reg->id == MLXSW_REG_MRSR_ID; 1473 1474 n_retry = 0; 1475 retry: 1476 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); 1477 if (!err) { 1478 err = mlxsw_emad_process_status(out_mbox, &status); 1479 if (err) { 1480 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1481 goto retry; 1482 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1483 status, mlxsw_emad_op_tlv_status_str(status)); 1484 } 1485 } 1486 1487 if (!err) 1488 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1489 reg->len); 1490 1491 mlxsw_cmd_mbox_free(out_mbox); 1492 free_in_mbox: 1493 mlxsw_cmd_mbox_free(in_mbox); 1494 if (err) 1495 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1496 reg->id, mlxsw_reg_id_str(reg->id), 1497 mlxsw_core_reg_access_type_str(type)); 1498 return err; 1499 } 1500 1501 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1502 char *payload, size_t payload_len, 1503 unsigned long cb_priv) 1504 { 1505 char *orig_payload = (char *) cb_priv; 1506 1507 memcpy(orig_payload, payload, payload_len); 1508 } 1509 1510 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1511 const struct mlxsw_reg_info *reg, 1512 char *payload, 1513 enum mlxsw_core_reg_access_type type) 1514 { 1515 LIST_HEAD(bulk_list); 1516 int err; 1517 1518 /* During initialization EMAD interface is not available to us, 1519 * so we default to command interface. We switch to EMAD interface 1520 * after setting the appropriate traps. 1521 */ 1522 if (!mlxsw_core->emad.use_emad) 1523 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1524 payload, type); 1525 1526 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1527 payload, type, &bulk_list, 1528 mlxsw_core_reg_access_cb, 1529 (unsigned long) payload); 1530 if (err) 1531 return err; 1532 return mlxsw_reg_trans_bulk_wait(&bulk_list); 1533 } 1534 1535 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1536 const struct mlxsw_reg_info *reg, char *payload) 1537 { 1538 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1539 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1540 } 1541 EXPORT_SYMBOL(mlxsw_reg_query); 1542 1543 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1544 const struct mlxsw_reg_info *reg, char *payload) 1545 { 1546 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1547 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1548 } 1549 EXPORT_SYMBOL(mlxsw_reg_write); 1550 1551 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1552 struct mlxsw_rx_info *rx_info) 1553 { 1554 struct mlxsw_rx_listener_item *rxl_item; 1555 const struct mlxsw_rx_listener *rxl; 1556 u8 local_port; 1557 bool found = false; 1558 1559 if (rx_info->is_lag) { 1560 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 1561 __func__, rx_info->u.lag_id, 1562 rx_info->trap_id); 1563 /* Upper layer does not care if the skb came from LAG or not, 1564 * so just get the local_port for the lag port and push it up. 1565 */ 1566 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 1567 rx_info->u.lag_id, 1568 rx_info->lag_port_index); 1569 } else { 1570 local_port = rx_info->u.sys_port; 1571 } 1572 1573 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 1574 __func__, local_port, rx_info->trap_id); 1575 1576 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1577 (local_port >= mlxsw_core->max_ports)) 1578 goto drop; 1579 1580 rcu_read_lock(); 1581 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1582 rxl = &rxl_item->rxl; 1583 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1584 rxl->local_port == local_port) && 1585 rxl->trap_id == rx_info->trap_id) { 1586 found = true; 1587 break; 1588 } 1589 } 1590 rcu_read_unlock(); 1591 if (!found) 1592 goto drop; 1593 1594 rxl->func(skb, local_port, rxl_item->priv); 1595 return; 1596 1597 drop: 1598 dev_kfree_skb(skb); 1599 } 1600 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1601 1602 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 1603 u16 lag_id, u8 port_index) 1604 { 1605 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 1606 port_index; 1607 } 1608 1609 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 1610 u16 lag_id, u8 port_index, u8 local_port) 1611 { 1612 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1613 lag_id, port_index); 1614 1615 mlxsw_core->lag.mapping[index] = local_port; 1616 } 1617 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 1618 1619 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 1620 u16 lag_id, u8 port_index) 1621 { 1622 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1623 lag_id, port_index); 1624 1625 return mlxsw_core->lag.mapping[index]; 1626 } 1627 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 1628 1629 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 1630 u16 lag_id, u8 local_port) 1631 { 1632 int i; 1633 1634 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 1635 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1636 lag_id, i); 1637 1638 if (mlxsw_core->lag.mapping[index] == local_port) 1639 mlxsw_core->lag.mapping[index] = 0; 1640 } 1641 } 1642 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1643 1644 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 1645 enum mlxsw_res_id res_id) 1646 { 1647 return mlxsw_res_valid(&mlxsw_core->res, res_id); 1648 } 1649 EXPORT_SYMBOL(mlxsw_core_res_valid); 1650 1651 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 1652 enum mlxsw_res_id res_id) 1653 { 1654 return mlxsw_res_get(&mlxsw_core->res, res_id); 1655 } 1656 EXPORT_SYMBOL(mlxsw_core_res_get); 1657 1658 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) 1659 { 1660 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1661 struct mlxsw_core_port *mlxsw_core_port = 1662 &mlxsw_core->ports[local_port]; 1663 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1664 int err; 1665 1666 mlxsw_core_port->local_port = local_port; 1667 err = devlink_port_register(devlink, devlink_port, local_port); 1668 if (err) 1669 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1670 return err; 1671 } 1672 EXPORT_SYMBOL(mlxsw_core_port_init); 1673 1674 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) 1675 { 1676 struct mlxsw_core_port *mlxsw_core_port = 1677 &mlxsw_core->ports[local_port]; 1678 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1679 1680 devlink_port_unregister(devlink_port); 1681 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1682 } 1683 EXPORT_SYMBOL(mlxsw_core_port_fini); 1684 1685 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1686 void *port_driver_priv, struct net_device *dev, 1687 u32 port_number, bool split, 1688 u32 split_port_subnumber) 1689 { 1690 struct mlxsw_core_port *mlxsw_core_port = 1691 &mlxsw_core->ports[local_port]; 1692 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1693 1694 mlxsw_core_port->port_driver_priv = port_driver_priv; 1695 devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, 1696 port_number, split, split_port_subnumber); 1697 devlink_port_type_eth_set(devlink_port, dev); 1698 } 1699 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 1700 1701 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1702 void *port_driver_priv) 1703 { 1704 struct mlxsw_core_port *mlxsw_core_port = 1705 &mlxsw_core->ports[local_port]; 1706 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1707 1708 mlxsw_core_port->port_driver_priv = port_driver_priv; 1709 devlink_port_type_ib_set(devlink_port, NULL); 1710 } 1711 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 1712 1713 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, 1714 void *port_driver_priv) 1715 { 1716 struct mlxsw_core_port *mlxsw_core_port = 1717 &mlxsw_core->ports[local_port]; 1718 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1719 1720 mlxsw_core_port->port_driver_priv = port_driver_priv; 1721 devlink_port_type_clear(devlink_port); 1722 } 1723 EXPORT_SYMBOL(mlxsw_core_port_clear); 1724 1725 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 1726 u8 local_port) 1727 { 1728 struct mlxsw_core_port *mlxsw_core_port = 1729 &mlxsw_core->ports[local_port]; 1730 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1731 1732 return devlink_port->type; 1733 } 1734 EXPORT_SYMBOL(mlxsw_core_port_type_get); 1735 1736 int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, 1737 u8 local_port, char *name, size_t len) 1738 { 1739 struct mlxsw_core_port *mlxsw_core_port = 1740 &mlxsw_core->ports[local_port]; 1741 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1742 1743 return devlink_port_get_phys_port_name(devlink_port, name, len); 1744 } 1745 EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); 1746 1747 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1748 const char *buf, size_t size) 1749 { 1750 __be32 *m = (__be32 *) buf; 1751 int i; 1752 int count = size / sizeof(__be32); 1753 1754 for (i = count - 1; i >= 0; i--) 1755 if (m[i]) 1756 break; 1757 i++; 1758 count = i ? i : 1; 1759 for (i = 0; i < count; i += 4) 1760 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1761 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1762 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1763 } 1764 1765 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1766 u32 in_mod, bool out_mbox_direct, bool reset_ok, 1767 char *in_mbox, size_t in_mbox_size, 1768 char *out_mbox, size_t out_mbox_size) 1769 { 1770 u8 status; 1771 int err; 1772 1773 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1774 if (!mlxsw_core->bus->cmd_exec) 1775 return -EOPNOTSUPP; 1776 1777 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1778 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1779 if (in_mbox) { 1780 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1781 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1782 } 1783 1784 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1785 opcode_mod, in_mod, out_mbox_direct, 1786 in_mbox, in_mbox_size, 1787 out_mbox, out_mbox_size, &status); 1788 1789 if (!err && out_mbox) { 1790 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1791 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1792 } 1793 1794 if (reset_ok && err == -EIO && 1795 status == MLXSW_CMD_STATUS_RUNNING_RESET) { 1796 err = 0; 1797 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1798 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1799 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1800 in_mod, status, mlxsw_cmd_status_str(status)); 1801 } else if (err == -ETIMEDOUT) { 1802 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1803 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1804 in_mod); 1805 } 1806 1807 return err; 1808 } 1809 EXPORT_SYMBOL(mlxsw_cmd_exec); 1810 1811 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1812 { 1813 return queue_delayed_work(mlxsw_wq, dwork, delay); 1814 } 1815 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1816 1817 bool mlxsw_core_schedule_work(struct work_struct *work) 1818 { 1819 return queue_work(mlxsw_owq, work); 1820 } 1821 EXPORT_SYMBOL(mlxsw_core_schedule_work); 1822 1823 void mlxsw_core_flush_owq(void) 1824 { 1825 flush_workqueue(mlxsw_owq); 1826 } 1827 EXPORT_SYMBOL(mlxsw_core_flush_owq); 1828 1829 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 1830 const struct mlxsw_config_profile *profile, 1831 u64 *p_single_size, u64 *p_double_size, 1832 u64 *p_linear_size) 1833 { 1834 struct mlxsw_driver *driver = mlxsw_core->driver; 1835 1836 if (!driver->kvd_sizes_get) 1837 return -EINVAL; 1838 1839 return driver->kvd_sizes_get(mlxsw_core, profile, 1840 p_single_size, p_double_size, 1841 p_linear_size); 1842 } 1843 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 1844 1845 static int __init mlxsw_core_module_init(void) 1846 { 1847 int err; 1848 1849 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 1850 if (!mlxsw_wq) 1851 return -ENOMEM; 1852 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 1853 mlxsw_core_driver_name); 1854 if (!mlxsw_owq) { 1855 err = -ENOMEM; 1856 goto err_alloc_ordered_workqueue; 1857 } 1858 return 0; 1859 1860 err_alloc_ordered_workqueue: 1861 destroy_workqueue(mlxsw_wq); 1862 return err; 1863 } 1864 1865 static void __exit mlxsw_core_module_exit(void) 1866 { 1867 destroy_workqueue(mlxsw_owq); 1868 destroy_workqueue(mlxsw_wq); 1869 } 1870 1871 module_init(mlxsw_core_module_init); 1872 module_exit(mlxsw_core_module_exit); 1873 1874 MODULE_LICENSE("Dual BSD/GPL"); 1875 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1876 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1877