1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/core.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/device.h> 40 #include <linux/export.h> 41 #include <linux/err.h> 42 #include <linux/if_link.h> 43 #include <linux/netdevice.h> 44 #include <linux/completion.h> 45 #include <linux/skbuff.h> 46 #include <linux/etherdevice.h> 47 #include <linux/types.h> 48 #include <linux/string.h> 49 #include <linux/gfp.h> 50 #include <linux/random.h> 51 #include <linux/jiffies.h> 52 #include <linux/mutex.h> 53 #include <linux/rcupdate.h> 54 #include <linux/slab.h> 55 #include <linux/workqueue.h> 56 #include <asm/byteorder.h> 57 #include <net/devlink.h> 58 #include <trace/events/devlink.h> 59 60 #include "core.h" 61 #include "item.h" 62 #include "cmd.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "emad.h" 66 #include "reg.h" 67 #include "resources.h" 68 69 static LIST_HEAD(mlxsw_core_driver_list); 70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); 71 72 static const char mlxsw_core_driver_name[] = "mlxsw_core"; 73 74 static struct workqueue_struct *mlxsw_wq; 75 static struct workqueue_struct *mlxsw_owq; 76 77 struct mlxsw_core_port { 78 struct devlink_port devlink_port; 79 void *port_driver_priv; 80 u8 local_port; 81 }; 82 83 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) 84 { 85 return mlxsw_core_port->port_driver_priv; 86 } 87 EXPORT_SYMBOL(mlxsw_core_port_driver_priv); 88 89 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) 90 { 91 return mlxsw_core_port->port_driver_priv != NULL; 92 } 93 94 struct mlxsw_core { 95 struct mlxsw_driver *driver; 96 const struct mlxsw_bus *bus; 97 void *bus_priv; 98 const struct mlxsw_bus_info *bus_info; 99 struct workqueue_struct *emad_wq; 100 struct list_head rx_listener_list; 101 struct list_head event_listener_list; 102 struct { 103 atomic64_t tid; 104 struct list_head trans_list; 105 spinlock_t trans_list_lock; /* protects trans_list writes */ 106 bool use_emad; 107 } emad; 108 struct { 109 u8 *mapping; /* lag_id+port_index to local_port mapping */ 110 } lag; 111 struct mlxsw_res res; 112 struct mlxsw_hwmon *hwmon; 113 struct mlxsw_thermal *thermal; 114 struct mlxsw_core_port *ports; 115 unsigned int max_ports; 116 unsigned long driver_priv[0]; 117 /* driver_priv has to be always the last item */ 118 }; 119 120 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 121 122 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) 123 { 124 /* Switch ports are numbered from 1 to queried value */ 125 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) 126 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, 127 MAX_SYSTEM_PORT) + 1; 128 else 129 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; 130 131 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, 132 sizeof(struct mlxsw_core_port), GFP_KERNEL); 133 if (!mlxsw_core->ports) 134 return -ENOMEM; 135 136 return 0; 137 } 138 139 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) 140 { 141 kfree(mlxsw_core->ports); 142 } 143 144 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) 145 { 146 return mlxsw_core->max_ports; 147 } 148 EXPORT_SYMBOL(mlxsw_core_max_ports); 149 150 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) 151 { 152 return mlxsw_core->driver_priv; 153 } 154 EXPORT_SYMBOL(mlxsw_core_driver_priv); 155 156 struct mlxsw_rx_listener_item { 157 struct list_head list; 158 struct mlxsw_rx_listener rxl; 159 void *priv; 160 }; 161 162 struct mlxsw_event_listener_item { 163 struct list_head list; 164 struct mlxsw_event_listener el; 165 void *priv; 166 }; 167 168 /****************** 169 * EMAD processing 170 ******************/ 171 172 /* emad_eth_hdr_dmac 173 * Destination MAC in EMAD's Ethernet header. 174 * Must be set to 01:02:c9:00:00:01 175 */ 176 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); 177 178 /* emad_eth_hdr_smac 179 * Source MAC in EMAD's Ethernet header. 180 * Must be set to 00:02:c9:01:02:03 181 */ 182 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); 183 184 /* emad_eth_hdr_ethertype 185 * Ethertype in EMAD's Ethernet header. 186 * Must be set to 0x8932 187 */ 188 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); 189 190 /* emad_eth_hdr_mlx_proto 191 * Mellanox protocol. 192 * Must be set to 0x0. 193 */ 194 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); 195 196 /* emad_eth_hdr_ver 197 * Mellanox protocol version. 198 * Must be set to 0x0. 199 */ 200 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); 201 202 /* emad_op_tlv_type 203 * Type of the TLV. 204 * Must be set to 0x1 (operation TLV). 205 */ 206 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); 207 208 /* emad_op_tlv_len 209 * Length of the operation TLV in u32. 210 * Must be set to 0x4. 211 */ 212 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); 213 214 /* emad_op_tlv_dr 215 * Direct route bit. Setting to 1 indicates the EMAD is a direct route 216 * EMAD. DR TLV must follow. 217 * 218 * Note: Currently not supported and must not be set. 219 */ 220 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); 221 222 /* emad_op_tlv_status 223 * Returned status in case of EMAD response. Must be set to 0 in case 224 * of EMAD request. 225 * 0x0 - success 226 * 0x1 - device is busy. Requester should retry 227 * 0x2 - Mellanox protocol version not supported 228 * 0x3 - unknown TLV 229 * 0x4 - register not supported 230 * 0x5 - operation class not supported 231 * 0x6 - EMAD method not supported 232 * 0x7 - bad parameter (e.g. port out of range) 233 * 0x8 - resource not available 234 * 0x9 - message receipt acknowledgment. Requester should retry 235 * 0x70 - internal error 236 */ 237 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); 238 239 /* emad_op_tlv_register_id 240 * Register ID of register within register TLV. 241 */ 242 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); 243 244 /* emad_op_tlv_r 245 * Response bit. Setting to 1 indicates Response, otherwise request. 246 */ 247 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); 248 249 /* emad_op_tlv_method 250 * EMAD method type. 251 * 0x1 - query 252 * 0x2 - write 253 * 0x3 - send (currently not supported) 254 * 0x4 - event 255 */ 256 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); 257 258 /* emad_op_tlv_class 259 * EMAD operation class. Must be set to 0x1 (REG_ACCESS). 260 */ 261 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); 262 263 /* emad_op_tlv_tid 264 * EMAD transaction ID. Used for pairing request and response EMADs. 265 */ 266 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); 267 268 /* emad_reg_tlv_type 269 * Type of the TLV. 270 * Must be set to 0x3 (register TLV). 271 */ 272 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); 273 274 /* emad_reg_tlv_len 275 * Length of the operation TLV in u32. 276 */ 277 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); 278 279 /* emad_end_tlv_type 280 * Type of the TLV. 281 * Must be set to 0x0 (end TLV). 282 */ 283 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); 284 285 /* emad_end_tlv_len 286 * Length of the end TLV in u32. 287 * Must be set to 1. 288 */ 289 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); 290 291 enum mlxsw_core_reg_access_type { 292 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 293 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 294 }; 295 296 static inline const char * 297 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) 298 { 299 switch (type) { 300 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: 301 return "query"; 302 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: 303 return "write"; 304 } 305 BUG(); 306 } 307 308 static void mlxsw_emad_pack_end_tlv(char *end_tlv) 309 { 310 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); 311 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); 312 } 313 314 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, 315 const struct mlxsw_reg_info *reg, 316 char *payload) 317 { 318 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); 319 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); 320 memcpy(reg_tlv + sizeof(u32), payload, reg->len); 321 } 322 323 static void mlxsw_emad_pack_op_tlv(char *op_tlv, 324 const struct mlxsw_reg_info *reg, 325 enum mlxsw_core_reg_access_type type, 326 u64 tid) 327 { 328 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); 329 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); 330 mlxsw_emad_op_tlv_dr_set(op_tlv, 0); 331 mlxsw_emad_op_tlv_status_set(op_tlv, 0); 332 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); 333 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); 334 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) 335 mlxsw_emad_op_tlv_method_set(op_tlv, 336 MLXSW_EMAD_OP_TLV_METHOD_QUERY); 337 else 338 mlxsw_emad_op_tlv_method_set(op_tlv, 339 MLXSW_EMAD_OP_TLV_METHOD_WRITE); 340 mlxsw_emad_op_tlv_class_set(op_tlv, 341 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); 342 mlxsw_emad_op_tlv_tid_set(op_tlv, tid); 343 } 344 345 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) 346 { 347 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); 348 349 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); 350 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); 351 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); 352 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); 353 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); 354 355 skb_reset_mac_header(skb); 356 357 return 0; 358 } 359 360 static void mlxsw_emad_construct(struct sk_buff *skb, 361 const struct mlxsw_reg_info *reg, 362 char *payload, 363 enum mlxsw_core_reg_access_type type, 364 u64 tid) 365 { 366 char *buf; 367 368 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); 369 mlxsw_emad_pack_end_tlv(buf); 370 371 buf = skb_push(skb, reg->len + sizeof(u32)); 372 mlxsw_emad_pack_reg_tlv(buf, reg, payload); 373 374 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); 375 mlxsw_emad_pack_op_tlv(buf, reg, type, tid); 376 377 mlxsw_emad_construct_eth_hdr(skb); 378 } 379 380 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) 381 { 382 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); 383 } 384 385 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) 386 { 387 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + 388 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); 389 } 390 391 static char *mlxsw_emad_reg_payload(const char *op_tlv) 392 { 393 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); 394 } 395 396 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) 397 { 398 char *op_tlv; 399 400 op_tlv = mlxsw_emad_op_tlv(skb); 401 return mlxsw_emad_op_tlv_tid_get(op_tlv); 402 } 403 404 static bool mlxsw_emad_is_resp(const struct sk_buff *skb) 405 { 406 char *op_tlv; 407 408 op_tlv = mlxsw_emad_op_tlv(skb); 409 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); 410 } 411 412 static int mlxsw_emad_process_status(char *op_tlv, 413 enum mlxsw_emad_op_tlv_status *p_status) 414 { 415 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); 416 417 switch (*p_status) { 418 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: 419 return 0; 420 case MLXSW_EMAD_OP_TLV_STATUS_BUSY: 421 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: 422 return -EAGAIN; 423 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: 424 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: 425 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: 426 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: 427 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: 428 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: 429 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: 430 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: 431 default: 432 return -EIO; 433 } 434 } 435 436 static int 437 mlxsw_emad_process_status_skb(struct sk_buff *skb, 438 enum mlxsw_emad_op_tlv_status *p_status) 439 { 440 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); 441 } 442 443 struct mlxsw_reg_trans { 444 struct list_head list; 445 struct list_head bulk_list; 446 struct mlxsw_core *core; 447 struct sk_buff *tx_skb; 448 struct mlxsw_tx_info tx_info; 449 struct delayed_work timeout_dw; 450 unsigned int retries; 451 u64 tid; 452 struct completion completion; 453 atomic_t active; 454 mlxsw_reg_trans_cb_t *cb; 455 unsigned long cb_priv; 456 const struct mlxsw_reg_info *reg; 457 enum mlxsw_core_reg_access_type type; 458 int err; 459 enum mlxsw_emad_op_tlv_status emad_status; 460 struct rcu_head rcu; 461 }; 462 463 #define MLXSW_EMAD_TIMEOUT_MS 200 464 465 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 466 { 467 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 468 469 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); 470 } 471 472 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 473 struct mlxsw_reg_trans *trans) 474 { 475 struct sk_buff *skb; 476 int err; 477 478 skb = skb_copy(trans->tx_skb, GFP_KERNEL); 479 if (!skb) 480 return -ENOMEM; 481 482 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, 483 skb->data + mlxsw_core->driver->txhdr_len, 484 skb->len - mlxsw_core->driver->txhdr_len); 485 486 atomic_set(&trans->active, 1); 487 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); 488 if (err) { 489 dev_kfree_skb(skb); 490 return err; 491 } 492 mlxsw_emad_trans_timeout_schedule(trans); 493 return 0; 494 } 495 496 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) 497 { 498 struct mlxsw_core *mlxsw_core = trans->core; 499 500 dev_kfree_skb(trans->tx_skb); 501 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 502 list_del_rcu(&trans->list); 503 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 504 trans->err = err; 505 complete(&trans->completion); 506 } 507 508 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, 509 struct mlxsw_reg_trans *trans) 510 { 511 int err; 512 513 if (trans->retries < MLXSW_EMAD_MAX_RETRY) { 514 trans->retries++; 515 err = mlxsw_emad_transmit(trans->core, trans); 516 if (err == 0) 517 return; 518 } else { 519 err = -EIO; 520 } 521 mlxsw_emad_trans_finish(trans, err); 522 } 523 524 static void mlxsw_emad_trans_timeout_work(struct work_struct *work) 525 { 526 struct mlxsw_reg_trans *trans = container_of(work, 527 struct mlxsw_reg_trans, 528 timeout_dw.work); 529 530 if (!atomic_dec_and_test(&trans->active)) 531 return; 532 533 mlxsw_emad_transmit_retry(trans->core, trans); 534 } 535 536 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, 537 struct mlxsw_reg_trans *trans, 538 struct sk_buff *skb) 539 { 540 int err; 541 542 if (!atomic_dec_and_test(&trans->active)) 543 return; 544 545 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); 546 if (err == -EAGAIN) { 547 mlxsw_emad_transmit_retry(mlxsw_core, trans); 548 } else { 549 if (err == 0) { 550 char *op_tlv = mlxsw_emad_op_tlv(skb); 551 552 if (trans->cb) 553 trans->cb(mlxsw_core, 554 mlxsw_emad_reg_payload(op_tlv), 555 trans->reg->len, trans->cb_priv); 556 } 557 mlxsw_emad_trans_finish(trans, err); 558 } 559 } 560 561 /* called with rcu read lock held */ 562 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, 563 void *priv) 564 { 565 struct mlxsw_core *mlxsw_core = priv; 566 struct mlxsw_reg_trans *trans; 567 568 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, 569 skb->data, skb->len); 570 571 if (!mlxsw_emad_is_resp(skb)) 572 goto free_skb; 573 574 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { 575 if (mlxsw_emad_get_tid(skb) == trans->tid) { 576 mlxsw_emad_process_response(mlxsw_core, trans, skb); 577 break; 578 } 579 } 580 581 free_skb: 582 dev_kfree_skb(skb); 583 } 584 585 static const struct mlxsw_listener mlxsw_emad_rx_listener = 586 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, 587 EMAD, DISCARD); 588 589 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 590 { 591 struct workqueue_struct *emad_wq; 592 u64 tid; 593 int err; 594 595 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 596 return 0; 597 598 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); 599 if (!emad_wq) 600 return -ENOMEM; 601 mlxsw_core->emad_wq = emad_wq; 602 603 /* Set the upper 32 bits of the transaction ID field to a random 604 * number. This allows us to discard EMADs addressed to other 605 * devices. 606 */ 607 get_random_bytes(&tid, 4); 608 tid <<= 32; 609 atomic64_set(&mlxsw_core->emad.tid, tid); 610 611 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); 612 spin_lock_init(&mlxsw_core->emad.trans_list_lock); 613 614 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, 615 mlxsw_core); 616 if (err) 617 return err; 618 619 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); 620 if (err) 621 goto err_emad_trap_set; 622 mlxsw_core->emad.use_emad = true; 623 624 return 0; 625 626 err_emad_trap_set: 627 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 628 mlxsw_core); 629 destroy_workqueue(mlxsw_core->emad_wq); 630 return err; 631 } 632 633 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) 634 { 635 636 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 637 return; 638 639 mlxsw_core->emad.use_emad = false; 640 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 641 mlxsw_core); 642 destroy_workqueue(mlxsw_core->emad_wq); 643 } 644 645 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 646 u16 reg_len) 647 { 648 struct sk_buff *skb; 649 u16 emad_len; 650 651 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + 652 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * 653 sizeof(u32) + mlxsw_core->driver->txhdr_len); 654 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) 655 return NULL; 656 657 skb = netdev_alloc_skb(NULL, emad_len); 658 if (!skb) 659 return NULL; 660 memset(skb->data, 0, emad_len); 661 skb_reserve(skb, emad_len); 662 663 return skb; 664 } 665 666 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, 667 const struct mlxsw_reg_info *reg, 668 char *payload, 669 enum mlxsw_core_reg_access_type type, 670 struct mlxsw_reg_trans *trans, 671 struct list_head *bulk_list, 672 mlxsw_reg_trans_cb_t *cb, 673 unsigned long cb_priv, u64 tid) 674 { 675 struct sk_buff *skb; 676 int err; 677 678 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", 679 tid, reg->id, mlxsw_reg_id_str(reg->id), 680 mlxsw_core_reg_access_type_str(type)); 681 682 skb = mlxsw_emad_alloc(mlxsw_core, reg->len); 683 if (!skb) 684 return -ENOMEM; 685 686 list_add_tail(&trans->bulk_list, bulk_list); 687 trans->core = mlxsw_core; 688 trans->tx_skb = skb; 689 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; 690 trans->tx_info.is_emad = true; 691 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); 692 trans->tid = tid; 693 init_completion(&trans->completion); 694 trans->cb = cb; 695 trans->cb_priv = cb_priv; 696 trans->reg = reg; 697 trans->type = type; 698 699 mlxsw_emad_construct(skb, reg, payload, type, trans->tid); 700 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); 701 702 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 703 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); 704 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 705 err = mlxsw_emad_transmit(mlxsw_core, trans); 706 if (err) 707 goto err_out; 708 return 0; 709 710 err_out: 711 spin_lock_bh(&mlxsw_core->emad.trans_list_lock); 712 list_del_rcu(&trans->list); 713 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); 714 list_del(&trans->bulk_list); 715 dev_kfree_skb(trans->tx_skb); 716 return err; 717 } 718 719 /***************** 720 * Core functions 721 *****************/ 722 723 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) 724 { 725 spin_lock(&mlxsw_core_driver_list_lock); 726 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); 727 spin_unlock(&mlxsw_core_driver_list_lock); 728 return 0; 729 } 730 EXPORT_SYMBOL(mlxsw_core_driver_register); 731 732 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) 733 { 734 spin_lock(&mlxsw_core_driver_list_lock); 735 list_del(&mlxsw_driver->list); 736 spin_unlock(&mlxsw_core_driver_list_lock); 737 } 738 EXPORT_SYMBOL(mlxsw_core_driver_unregister); 739 740 static struct mlxsw_driver *__driver_find(const char *kind) 741 { 742 struct mlxsw_driver *mlxsw_driver; 743 744 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { 745 if (strcmp(mlxsw_driver->kind, kind) == 0) 746 return mlxsw_driver; 747 } 748 return NULL; 749 } 750 751 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) 752 { 753 struct mlxsw_driver *mlxsw_driver; 754 755 spin_lock(&mlxsw_core_driver_list_lock); 756 mlxsw_driver = __driver_find(kind); 757 spin_unlock(&mlxsw_core_driver_list_lock); 758 return mlxsw_driver; 759 } 760 761 static void mlxsw_core_driver_put(const char *kind) 762 { 763 struct mlxsw_driver *mlxsw_driver; 764 765 spin_lock(&mlxsw_core_driver_list_lock); 766 mlxsw_driver = __driver_find(kind); 767 spin_unlock(&mlxsw_core_driver_list_lock); 768 } 769 770 static int mlxsw_devlink_port_split(struct devlink *devlink, 771 unsigned int port_index, 772 unsigned int count) 773 { 774 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 775 776 if (port_index >= mlxsw_core->max_ports) 777 return -EINVAL; 778 if (!mlxsw_core->driver->port_split) 779 return -EOPNOTSUPP; 780 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); 781 } 782 783 static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 784 unsigned int port_index) 785 { 786 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 787 788 if (port_index >= mlxsw_core->max_ports) 789 return -EINVAL; 790 if (!mlxsw_core->driver->port_unsplit) 791 return -EOPNOTSUPP; 792 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); 793 } 794 795 static int 796 mlxsw_devlink_sb_pool_get(struct devlink *devlink, 797 unsigned int sb_index, u16 pool_index, 798 struct devlink_sb_pool_info *pool_info) 799 { 800 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 801 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 802 803 if (!mlxsw_driver->sb_pool_get) 804 return -EOPNOTSUPP; 805 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, 806 pool_index, pool_info); 807 } 808 809 static int 810 mlxsw_devlink_sb_pool_set(struct devlink *devlink, 811 unsigned int sb_index, u16 pool_index, u32 size, 812 enum devlink_sb_threshold_type threshold_type) 813 { 814 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 815 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 816 817 if (!mlxsw_driver->sb_pool_set) 818 return -EOPNOTSUPP; 819 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, 820 pool_index, size, threshold_type); 821 } 822 823 static void *__dl_port(struct devlink_port *devlink_port) 824 { 825 return container_of(devlink_port, struct mlxsw_core_port, devlink_port); 826 } 827 828 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, 829 enum devlink_port_type port_type) 830 { 831 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 832 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 833 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 834 835 if (!mlxsw_driver->port_type_set) 836 return -EOPNOTSUPP; 837 838 return mlxsw_driver->port_type_set(mlxsw_core, 839 mlxsw_core_port->local_port, 840 port_type); 841 } 842 843 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, 844 unsigned int sb_index, u16 pool_index, 845 u32 *p_threshold) 846 { 847 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 848 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 849 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 850 851 if (!mlxsw_driver->sb_port_pool_get || 852 !mlxsw_core_port_check(mlxsw_core_port)) 853 return -EOPNOTSUPP; 854 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, 855 pool_index, p_threshold); 856 } 857 858 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, 859 unsigned int sb_index, u16 pool_index, 860 u32 threshold) 861 { 862 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 863 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 864 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 865 866 if (!mlxsw_driver->sb_port_pool_set || 867 !mlxsw_core_port_check(mlxsw_core_port)) 868 return -EOPNOTSUPP; 869 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, 870 pool_index, threshold); 871 } 872 873 static int 874 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, 875 unsigned int sb_index, u16 tc_index, 876 enum devlink_sb_pool_type pool_type, 877 u16 *p_pool_index, u32 *p_threshold) 878 { 879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 881 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 882 883 if (!mlxsw_driver->sb_tc_pool_bind_get || 884 !mlxsw_core_port_check(mlxsw_core_port)) 885 return -EOPNOTSUPP; 886 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, 887 tc_index, pool_type, 888 p_pool_index, p_threshold); 889 } 890 891 static int 892 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, 893 unsigned int sb_index, u16 tc_index, 894 enum devlink_sb_pool_type pool_type, 895 u16 pool_index, u32 threshold) 896 { 897 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 898 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 899 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 900 901 if (!mlxsw_driver->sb_tc_pool_bind_set || 902 !mlxsw_core_port_check(mlxsw_core_port)) 903 return -EOPNOTSUPP; 904 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, 905 tc_index, pool_type, 906 pool_index, threshold); 907 } 908 909 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, 910 unsigned int sb_index) 911 { 912 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 913 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 914 915 if (!mlxsw_driver->sb_occ_snapshot) 916 return -EOPNOTSUPP; 917 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); 918 } 919 920 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, 921 unsigned int sb_index) 922 { 923 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 924 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 925 926 if (!mlxsw_driver->sb_occ_max_clear) 927 return -EOPNOTSUPP; 928 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); 929 } 930 931 static int 932 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, 933 unsigned int sb_index, u16 pool_index, 934 u32 *p_cur, u32 *p_max) 935 { 936 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 937 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 938 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 939 940 if (!mlxsw_driver->sb_occ_port_pool_get || 941 !mlxsw_core_port_check(mlxsw_core_port)) 942 return -EOPNOTSUPP; 943 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, 944 pool_index, p_cur, p_max); 945 } 946 947 static int 948 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, 949 unsigned int sb_index, u16 tc_index, 950 enum devlink_sb_pool_type pool_type, 951 u32 *p_cur, u32 *p_max) 952 { 953 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); 954 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; 955 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); 956 957 if (!mlxsw_driver->sb_occ_tc_port_bind_get || 958 !mlxsw_core_port_check(mlxsw_core_port)) 959 return -EOPNOTSUPP; 960 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, 961 sb_index, tc_index, 962 pool_type, p_cur, p_max); 963 } 964 965 static const struct devlink_ops mlxsw_devlink_ops = { 966 .port_type_set = mlxsw_devlink_port_type_set, 967 .port_split = mlxsw_devlink_port_split, 968 .port_unsplit = mlxsw_devlink_port_unsplit, 969 .sb_pool_get = mlxsw_devlink_sb_pool_get, 970 .sb_pool_set = mlxsw_devlink_sb_pool_set, 971 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, 972 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, 973 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, 974 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, 975 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, 976 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, 977 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, 978 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, 979 }; 980 981 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, 982 const struct mlxsw_bus *mlxsw_bus, 983 void *bus_priv) 984 { 985 const char *device_kind = mlxsw_bus_info->device_kind; 986 struct mlxsw_core *mlxsw_core; 987 struct mlxsw_driver *mlxsw_driver; 988 struct devlink *devlink; 989 size_t alloc_size; 990 int err; 991 992 mlxsw_driver = mlxsw_core_driver_get(device_kind); 993 if (!mlxsw_driver) 994 return -EINVAL; 995 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; 996 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); 997 if (!devlink) { 998 err = -ENOMEM; 999 goto err_devlink_alloc; 1000 } 1001 1002 mlxsw_core = devlink_priv(devlink); 1003 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); 1004 INIT_LIST_HEAD(&mlxsw_core->event_listener_list); 1005 mlxsw_core->driver = mlxsw_driver; 1006 mlxsw_core->bus = mlxsw_bus; 1007 mlxsw_core->bus_priv = bus_priv; 1008 mlxsw_core->bus_info = mlxsw_bus_info; 1009 1010 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, 1011 &mlxsw_core->res); 1012 if (err) 1013 goto err_bus_init; 1014 1015 err = mlxsw_ports_init(mlxsw_core); 1016 if (err) 1017 goto err_ports_init; 1018 1019 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && 1020 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { 1021 alloc_size = sizeof(u8) * 1022 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * 1023 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); 1024 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); 1025 if (!mlxsw_core->lag.mapping) { 1026 err = -ENOMEM; 1027 goto err_alloc_lag_mapping; 1028 } 1029 } 1030 1031 err = mlxsw_emad_init(mlxsw_core); 1032 if (err) 1033 goto err_emad_init; 1034 1035 err = devlink_register(devlink, mlxsw_bus_info->dev); 1036 if (err) 1037 goto err_devlink_register; 1038 1039 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); 1040 if (err) 1041 goto err_hwmon_init; 1042 1043 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, 1044 &mlxsw_core->thermal); 1045 if (err) 1046 goto err_thermal_init; 1047 1048 if (mlxsw_driver->init) { 1049 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); 1050 if (err) 1051 goto err_driver_init; 1052 } 1053 1054 return 0; 1055 1056 err_driver_init: 1057 mlxsw_thermal_fini(mlxsw_core->thermal); 1058 err_thermal_init: 1059 err_hwmon_init: 1060 devlink_unregister(devlink); 1061 err_devlink_register: 1062 mlxsw_emad_fini(mlxsw_core); 1063 err_emad_init: 1064 kfree(mlxsw_core->lag.mapping); 1065 err_alloc_lag_mapping: 1066 mlxsw_ports_fini(mlxsw_core); 1067 err_ports_init: 1068 mlxsw_bus->fini(bus_priv); 1069 err_bus_init: 1070 devlink_free(devlink); 1071 err_devlink_alloc: 1072 mlxsw_core_driver_put(device_kind); 1073 return err; 1074 } 1075 EXPORT_SYMBOL(mlxsw_core_bus_device_register); 1076 1077 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) 1078 { 1079 const char *device_kind = mlxsw_core->bus_info->device_kind; 1080 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1081 1082 if (mlxsw_core->driver->fini) 1083 mlxsw_core->driver->fini(mlxsw_core); 1084 mlxsw_thermal_fini(mlxsw_core->thermal); 1085 devlink_unregister(devlink); 1086 mlxsw_emad_fini(mlxsw_core); 1087 kfree(mlxsw_core->lag.mapping); 1088 mlxsw_ports_fini(mlxsw_core); 1089 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 1090 devlink_free(devlink); 1091 mlxsw_core_driver_put(device_kind); 1092 } 1093 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 1094 1095 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, 1096 const struct mlxsw_tx_info *tx_info) 1097 { 1098 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 1099 tx_info); 1100 } 1101 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 1102 1103 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1104 const struct mlxsw_tx_info *tx_info) 1105 { 1106 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 1107 tx_info); 1108 } 1109 EXPORT_SYMBOL(mlxsw_core_skb_transmit); 1110 1111 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, 1112 const struct mlxsw_rx_listener *rxl_b) 1113 { 1114 return (rxl_a->func == rxl_b->func && 1115 rxl_a->local_port == rxl_b->local_port && 1116 rxl_a->trap_id == rxl_b->trap_id); 1117 } 1118 1119 static struct mlxsw_rx_listener_item * 1120 __find_rx_listener_item(struct mlxsw_core *mlxsw_core, 1121 const struct mlxsw_rx_listener *rxl, 1122 void *priv) 1123 { 1124 struct mlxsw_rx_listener_item *rxl_item; 1125 1126 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { 1127 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && 1128 rxl_item->priv == priv) 1129 return rxl_item; 1130 } 1131 return NULL; 1132 } 1133 1134 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, 1135 const struct mlxsw_rx_listener *rxl, 1136 void *priv) 1137 { 1138 struct mlxsw_rx_listener_item *rxl_item; 1139 1140 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1141 if (rxl_item) 1142 return -EEXIST; 1143 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); 1144 if (!rxl_item) 1145 return -ENOMEM; 1146 rxl_item->rxl = *rxl; 1147 rxl_item->priv = priv; 1148 1149 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); 1150 return 0; 1151 } 1152 EXPORT_SYMBOL(mlxsw_core_rx_listener_register); 1153 1154 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, 1155 const struct mlxsw_rx_listener *rxl, 1156 void *priv) 1157 { 1158 struct mlxsw_rx_listener_item *rxl_item; 1159 1160 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); 1161 if (!rxl_item) 1162 return; 1163 list_del_rcu(&rxl_item->list); 1164 synchronize_rcu(); 1165 kfree(rxl_item); 1166 } 1167 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); 1168 1169 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, 1170 void *priv) 1171 { 1172 struct mlxsw_event_listener_item *event_listener_item = priv; 1173 struct mlxsw_reg_info reg; 1174 char *payload; 1175 char *op_tlv = mlxsw_emad_op_tlv(skb); 1176 char *reg_tlv = mlxsw_emad_reg_tlv(skb); 1177 1178 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); 1179 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); 1180 payload = mlxsw_emad_reg_payload(op_tlv); 1181 event_listener_item->el.func(®, payload, event_listener_item->priv); 1182 dev_kfree_skb(skb); 1183 } 1184 1185 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, 1186 const struct mlxsw_event_listener *el_b) 1187 { 1188 return (el_a->func == el_b->func && 1189 el_a->trap_id == el_b->trap_id); 1190 } 1191 1192 static struct mlxsw_event_listener_item * 1193 __find_event_listener_item(struct mlxsw_core *mlxsw_core, 1194 const struct mlxsw_event_listener *el, 1195 void *priv) 1196 { 1197 struct mlxsw_event_listener_item *el_item; 1198 1199 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { 1200 if (__is_event_listener_equal(&el_item->el, el) && 1201 el_item->priv == priv) 1202 return el_item; 1203 } 1204 return NULL; 1205 } 1206 1207 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, 1208 const struct mlxsw_event_listener *el, 1209 void *priv) 1210 { 1211 int err; 1212 struct mlxsw_event_listener_item *el_item; 1213 const struct mlxsw_rx_listener rxl = { 1214 .func = mlxsw_core_event_listener_func, 1215 .local_port = MLXSW_PORT_DONT_CARE, 1216 .trap_id = el->trap_id, 1217 }; 1218 1219 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1220 if (el_item) 1221 return -EEXIST; 1222 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); 1223 if (!el_item) 1224 return -ENOMEM; 1225 el_item->el = *el; 1226 el_item->priv = priv; 1227 1228 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); 1229 if (err) 1230 goto err_rx_listener_register; 1231 1232 /* No reason to save item if we did not manage to register an RX 1233 * listener for it. 1234 */ 1235 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); 1236 1237 return 0; 1238 1239 err_rx_listener_register: 1240 kfree(el_item); 1241 return err; 1242 } 1243 EXPORT_SYMBOL(mlxsw_core_event_listener_register); 1244 1245 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, 1246 const struct mlxsw_event_listener *el, 1247 void *priv) 1248 { 1249 struct mlxsw_event_listener_item *el_item; 1250 const struct mlxsw_rx_listener rxl = { 1251 .func = mlxsw_core_event_listener_func, 1252 .local_port = MLXSW_PORT_DONT_CARE, 1253 .trap_id = el->trap_id, 1254 }; 1255 1256 el_item = __find_event_listener_item(mlxsw_core, el, priv); 1257 if (!el_item) 1258 return; 1259 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); 1260 list_del(&el_item->list); 1261 kfree(el_item); 1262 } 1263 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); 1264 1265 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, 1266 const struct mlxsw_listener *listener, 1267 void *priv) 1268 { 1269 if (listener->is_event) 1270 return mlxsw_core_event_listener_register(mlxsw_core, 1271 &listener->u.event_listener, 1272 priv); 1273 else 1274 return mlxsw_core_rx_listener_register(mlxsw_core, 1275 &listener->u.rx_listener, 1276 priv); 1277 } 1278 1279 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, 1280 const struct mlxsw_listener *listener, 1281 void *priv) 1282 { 1283 if (listener->is_event) 1284 mlxsw_core_event_listener_unregister(mlxsw_core, 1285 &listener->u.event_listener, 1286 priv); 1287 else 1288 mlxsw_core_rx_listener_unregister(mlxsw_core, 1289 &listener->u.rx_listener, 1290 priv); 1291 } 1292 1293 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, 1294 const struct mlxsw_listener *listener, void *priv) 1295 { 1296 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1297 int err; 1298 1299 err = mlxsw_core_listener_register(mlxsw_core, listener, priv); 1300 if (err) 1301 return err; 1302 1303 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, 1304 listener->trap_group, listener->is_ctrl); 1305 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1306 if (err) 1307 goto err_trap_set; 1308 1309 return 0; 1310 1311 err_trap_set: 1312 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1313 return err; 1314 } 1315 EXPORT_SYMBOL(mlxsw_core_trap_register); 1316 1317 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, 1318 const struct mlxsw_listener *listener, 1319 void *priv) 1320 { 1321 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1322 1323 if (!listener->is_event) { 1324 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, 1325 listener->trap_id, listener->trap_group, 1326 listener->is_ctrl); 1327 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); 1328 } 1329 1330 mlxsw_core_listener_unregister(mlxsw_core, listener, priv); 1331 } 1332 EXPORT_SYMBOL(mlxsw_core_trap_unregister); 1333 1334 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) 1335 { 1336 return atomic64_inc_return(&mlxsw_core->emad.tid); 1337 } 1338 1339 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, 1340 const struct mlxsw_reg_info *reg, 1341 char *payload, 1342 enum mlxsw_core_reg_access_type type, 1343 struct list_head *bulk_list, 1344 mlxsw_reg_trans_cb_t *cb, 1345 unsigned long cb_priv) 1346 { 1347 u64 tid = mlxsw_core_tid_get(mlxsw_core); 1348 struct mlxsw_reg_trans *trans; 1349 int err; 1350 1351 trans = kzalloc(sizeof(*trans), GFP_KERNEL); 1352 if (!trans) 1353 return -ENOMEM; 1354 1355 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, 1356 bulk_list, cb, cb_priv, tid); 1357 if (err) { 1358 kfree(trans); 1359 return err; 1360 } 1361 return 0; 1362 } 1363 1364 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, 1365 const struct mlxsw_reg_info *reg, char *payload, 1366 struct list_head *bulk_list, 1367 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1368 { 1369 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1370 MLXSW_CORE_REG_ACCESS_TYPE_QUERY, 1371 bulk_list, cb, cb_priv); 1372 } 1373 EXPORT_SYMBOL(mlxsw_reg_trans_query); 1374 1375 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, 1376 const struct mlxsw_reg_info *reg, char *payload, 1377 struct list_head *bulk_list, 1378 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) 1379 { 1380 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, 1381 MLXSW_CORE_REG_ACCESS_TYPE_WRITE, 1382 bulk_list, cb, cb_priv); 1383 } 1384 EXPORT_SYMBOL(mlxsw_reg_trans_write); 1385 1386 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) 1387 { 1388 struct mlxsw_core *mlxsw_core = trans->core; 1389 int err; 1390 1391 wait_for_completion(&trans->completion); 1392 cancel_delayed_work_sync(&trans->timeout_dw); 1393 err = trans->err; 1394 1395 if (trans->retries) 1396 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", 1397 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); 1398 if (err) 1399 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", 1400 trans->tid, trans->reg->id, 1401 mlxsw_reg_id_str(trans->reg->id), 1402 mlxsw_core_reg_access_type_str(trans->type), 1403 trans->emad_status, 1404 mlxsw_emad_op_tlv_status_str(trans->emad_status)); 1405 1406 list_del(&trans->bulk_list); 1407 kfree_rcu(trans, rcu); 1408 return err; 1409 } 1410 1411 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) 1412 { 1413 struct mlxsw_reg_trans *trans; 1414 struct mlxsw_reg_trans *tmp; 1415 int sum_err = 0; 1416 int err; 1417 1418 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { 1419 err = mlxsw_reg_trans_wait(trans); 1420 if (err && sum_err == 0) 1421 sum_err = err; /* first error to be returned */ 1422 } 1423 return sum_err; 1424 } 1425 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); 1426 1427 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, 1428 const struct mlxsw_reg_info *reg, 1429 char *payload, 1430 enum mlxsw_core_reg_access_type type) 1431 { 1432 enum mlxsw_emad_op_tlv_status status; 1433 int err, n_retry; 1434 char *in_mbox, *out_mbox, *tmp; 1435 1436 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", 1437 reg->id, mlxsw_reg_id_str(reg->id), 1438 mlxsw_core_reg_access_type_str(type)); 1439 1440 in_mbox = mlxsw_cmd_mbox_alloc(); 1441 if (!in_mbox) 1442 return -ENOMEM; 1443 1444 out_mbox = mlxsw_cmd_mbox_alloc(); 1445 if (!out_mbox) { 1446 err = -ENOMEM; 1447 goto free_in_mbox; 1448 } 1449 1450 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, 1451 mlxsw_core_tid_get(mlxsw_core)); 1452 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); 1453 mlxsw_emad_pack_reg_tlv(tmp, reg, payload); 1454 1455 n_retry = 0; 1456 retry: 1457 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); 1458 if (!err) { 1459 err = mlxsw_emad_process_status(out_mbox, &status); 1460 if (err) { 1461 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) 1462 goto retry; 1463 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", 1464 status, mlxsw_emad_op_tlv_status_str(status)); 1465 } 1466 } 1467 1468 if (!err) 1469 memcpy(payload, mlxsw_emad_reg_payload(out_mbox), 1470 reg->len); 1471 1472 mlxsw_cmd_mbox_free(out_mbox); 1473 free_in_mbox: 1474 mlxsw_cmd_mbox_free(in_mbox); 1475 if (err) 1476 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", 1477 reg->id, mlxsw_reg_id_str(reg->id), 1478 mlxsw_core_reg_access_type_str(type)); 1479 return err; 1480 } 1481 1482 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, 1483 char *payload, size_t payload_len, 1484 unsigned long cb_priv) 1485 { 1486 char *orig_payload = (char *) cb_priv; 1487 1488 memcpy(orig_payload, payload, payload_len); 1489 } 1490 1491 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, 1492 const struct mlxsw_reg_info *reg, 1493 char *payload, 1494 enum mlxsw_core_reg_access_type type) 1495 { 1496 LIST_HEAD(bulk_list); 1497 int err; 1498 1499 /* During initialization EMAD interface is not available to us, 1500 * so we default to command interface. We switch to EMAD interface 1501 * after setting the appropriate traps. 1502 */ 1503 if (!mlxsw_core->emad.use_emad) 1504 return mlxsw_core_reg_access_cmd(mlxsw_core, reg, 1505 payload, type); 1506 1507 err = mlxsw_core_reg_access_emad(mlxsw_core, reg, 1508 payload, type, &bulk_list, 1509 mlxsw_core_reg_access_cb, 1510 (unsigned long) payload); 1511 if (err) 1512 return err; 1513 return mlxsw_reg_trans_bulk_wait(&bulk_list); 1514 } 1515 1516 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, 1517 const struct mlxsw_reg_info *reg, char *payload) 1518 { 1519 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1520 MLXSW_CORE_REG_ACCESS_TYPE_QUERY); 1521 } 1522 EXPORT_SYMBOL(mlxsw_reg_query); 1523 1524 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, 1525 const struct mlxsw_reg_info *reg, char *payload) 1526 { 1527 return mlxsw_core_reg_access(mlxsw_core, reg, payload, 1528 MLXSW_CORE_REG_ACCESS_TYPE_WRITE); 1529 } 1530 EXPORT_SYMBOL(mlxsw_reg_write); 1531 1532 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, 1533 struct mlxsw_rx_info *rx_info) 1534 { 1535 struct mlxsw_rx_listener_item *rxl_item; 1536 const struct mlxsw_rx_listener *rxl; 1537 u8 local_port; 1538 bool found = false; 1539 1540 if (rx_info->is_lag) { 1541 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", 1542 __func__, rx_info->u.lag_id, 1543 rx_info->trap_id); 1544 /* Upper layer does not care if the skb came from LAG or not, 1545 * so just get the local_port for the lag port and push it up. 1546 */ 1547 local_port = mlxsw_core_lag_mapping_get(mlxsw_core, 1548 rx_info->u.lag_id, 1549 rx_info->lag_port_index); 1550 } else { 1551 local_port = rx_info->u.sys_port; 1552 } 1553 1554 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", 1555 __func__, local_port, rx_info->trap_id); 1556 1557 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || 1558 (local_port >= mlxsw_core->max_ports)) 1559 goto drop; 1560 1561 rcu_read_lock(); 1562 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { 1563 rxl = &rxl_item->rxl; 1564 if ((rxl->local_port == MLXSW_PORT_DONT_CARE || 1565 rxl->local_port == local_port) && 1566 rxl->trap_id == rx_info->trap_id) { 1567 found = true; 1568 break; 1569 } 1570 } 1571 rcu_read_unlock(); 1572 if (!found) 1573 goto drop; 1574 1575 rxl->func(skb, local_port, rxl_item->priv); 1576 return; 1577 1578 drop: 1579 dev_kfree_skb(skb); 1580 } 1581 EXPORT_SYMBOL(mlxsw_core_skb_receive); 1582 1583 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, 1584 u16 lag_id, u8 port_index) 1585 { 1586 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + 1587 port_index; 1588 } 1589 1590 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, 1591 u16 lag_id, u8 port_index, u8 local_port) 1592 { 1593 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1594 lag_id, port_index); 1595 1596 mlxsw_core->lag.mapping[index] = local_port; 1597 } 1598 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); 1599 1600 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, 1601 u16 lag_id, u8 port_index) 1602 { 1603 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1604 lag_id, port_index); 1605 1606 return mlxsw_core->lag.mapping[index]; 1607 } 1608 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); 1609 1610 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 1611 u16 lag_id, u8 local_port) 1612 { 1613 int i; 1614 1615 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { 1616 int index = mlxsw_core_lag_mapping_index(mlxsw_core, 1617 lag_id, i); 1618 1619 if (mlxsw_core->lag.mapping[index] == local_port) 1620 mlxsw_core->lag.mapping[index] = 0; 1621 } 1622 } 1623 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1624 1625 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 1626 enum mlxsw_res_id res_id) 1627 { 1628 return mlxsw_res_valid(&mlxsw_core->res, res_id); 1629 } 1630 EXPORT_SYMBOL(mlxsw_core_res_valid); 1631 1632 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, 1633 enum mlxsw_res_id res_id) 1634 { 1635 return mlxsw_res_get(&mlxsw_core->res, res_id); 1636 } 1637 EXPORT_SYMBOL(mlxsw_core_res_get); 1638 1639 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) 1640 { 1641 struct devlink *devlink = priv_to_devlink(mlxsw_core); 1642 struct mlxsw_core_port *mlxsw_core_port = 1643 &mlxsw_core->ports[local_port]; 1644 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1645 int err; 1646 1647 mlxsw_core_port->local_port = local_port; 1648 err = devlink_port_register(devlink, devlink_port, local_port); 1649 if (err) 1650 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1651 return err; 1652 } 1653 EXPORT_SYMBOL(mlxsw_core_port_init); 1654 1655 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) 1656 { 1657 struct mlxsw_core_port *mlxsw_core_port = 1658 &mlxsw_core->ports[local_port]; 1659 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1660 1661 devlink_port_unregister(devlink_port); 1662 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); 1663 } 1664 EXPORT_SYMBOL(mlxsw_core_port_fini); 1665 1666 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1667 void *port_driver_priv, struct net_device *dev, 1668 bool split, u32 split_group) 1669 { 1670 struct mlxsw_core_port *mlxsw_core_port = 1671 &mlxsw_core->ports[local_port]; 1672 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1673 1674 mlxsw_core_port->port_driver_priv = port_driver_priv; 1675 if (split) 1676 devlink_port_split_set(devlink_port, split_group); 1677 devlink_port_type_eth_set(devlink_port, dev); 1678 } 1679 EXPORT_SYMBOL(mlxsw_core_port_eth_set); 1680 1681 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1682 void *port_driver_priv) 1683 { 1684 struct mlxsw_core_port *mlxsw_core_port = 1685 &mlxsw_core->ports[local_port]; 1686 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1687 1688 mlxsw_core_port->port_driver_priv = port_driver_priv; 1689 devlink_port_type_ib_set(devlink_port, NULL); 1690 } 1691 EXPORT_SYMBOL(mlxsw_core_port_ib_set); 1692 1693 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, 1694 void *port_driver_priv) 1695 { 1696 struct mlxsw_core_port *mlxsw_core_port = 1697 &mlxsw_core->ports[local_port]; 1698 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1699 1700 mlxsw_core_port->port_driver_priv = port_driver_priv; 1701 devlink_port_type_clear(devlink_port); 1702 } 1703 EXPORT_SYMBOL(mlxsw_core_port_clear); 1704 1705 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, 1706 u8 local_port) 1707 { 1708 struct mlxsw_core_port *mlxsw_core_port = 1709 &mlxsw_core->ports[local_port]; 1710 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; 1711 1712 return devlink_port->type; 1713 } 1714 EXPORT_SYMBOL(mlxsw_core_port_type_get); 1715 1716 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, 1717 const char *buf, size_t size) 1718 { 1719 __be32 *m = (__be32 *) buf; 1720 int i; 1721 int count = size / sizeof(__be32); 1722 1723 for (i = count - 1; i >= 0; i--) 1724 if (m[i]) 1725 break; 1726 i++; 1727 count = i ? i : 1; 1728 for (i = 0; i < count; i += 4) 1729 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", 1730 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), 1731 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); 1732 } 1733 1734 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1735 u32 in_mod, bool out_mbox_direct, 1736 char *in_mbox, size_t in_mbox_size, 1737 char *out_mbox, size_t out_mbox_size) 1738 { 1739 u8 status; 1740 int err; 1741 1742 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); 1743 if (!mlxsw_core->bus->cmd_exec) 1744 return -EOPNOTSUPP; 1745 1746 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1747 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); 1748 if (in_mbox) { 1749 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); 1750 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); 1751 } 1752 1753 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, 1754 opcode_mod, in_mod, out_mbox_direct, 1755 in_mbox, in_mbox_size, 1756 out_mbox, out_mbox_size, &status); 1757 1758 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { 1759 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", 1760 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1761 in_mod, status, mlxsw_cmd_status_str(status)); 1762 } else if (err == -ETIMEDOUT) { 1763 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", 1764 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, 1765 in_mod); 1766 } 1767 1768 if (!err && out_mbox) { 1769 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); 1770 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); 1771 } 1772 return err; 1773 } 1774 EXPORT_SYMBOL(mlxsw_cmd_exec); 1775 1776 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) 1777 { 1778 return queue_delayed_work(mlxsw_wq, dwork, delay); 1779 } 1780 EXPORT_SYMBOL(mlxsw_core_schedule_dw); 1781 1782 bool mlxsw_core_schedule_work(struct work_struct *work) 1783 { 1784 return queue_work(mlxsw_owq, work); 1785 } 1786 EXPORT_SYMBOL(mlxsw_core_schedule_work); 1787 1788 void mlxsw_core_flush_owq(void) 1789 { 1790 flush_workqueue(mlxsw_owq); 1791 } 1792 EXPORT_SYMBOL(mlxsw_core_flush_owq); 1793 1794 static int __init mlxsw_core_module_init(void) 1795 { 1796 int err; 1797 1798 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 1799 if (!mlxsw_wq) 1800 return -ENOMEM; 1801 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 1802 mlxsw_core_driver_name); 1803 if (!mlxsw_owq) { 1804 err = -ENOMEM; 1805 goto err_alloc_ordered_workqueue; 1806 } 1807 return 0; 1808 1809 err_alloc_ordered_workqueue: 1810 destroy_workqueue(mlxsw_wq); 1811 return err; 1812 } 1813 1814 static void __exit mlxsw_core_module_exit(void) 1815 { 1816 destroy_workqueue(mlxsw_owq); 1817 destroy_workqueue(mlxsw_wq); 1818 } 1819 1820 module_init(mlxsw_core_module_init); 1821 module_exit(mlxsw_core_module_exit); 1822 1823 MODULE_LICENSE("Dual BSD/GPL"); 1824 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1825 MODULE_DESCRIPTION("Mellanox switch device core driver"); 1826