1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/net.h> 49 #include <rdma/opa_addr.h> 50 #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \ 51 / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16))) 52 53 #include "hfi.h" 54 #include "mad.h" 55 #include "trace.h" 56 #include "qp.h" 57 #include "vnic.h" 58 59 /* the reset value from the FM is supposed to be 0xffff, handle both */ 60 #define OPA_LINK_WIDTH_RESET_OLD 0x0fff 61 #define OPA_LINK_WIDTH_RESET 0xffff 62 63 struct trap_node { 64 struct list_head list; 65 struct opa_mad_notice_attr data; 66 __be64 tid; 67 int len; 68 u32 retry; 69 u8 in_use; 70 u8 repress; 71 }; 72 73 static int smp_length_check(u32 data_size, u32 request_len) 74 { 75 if (unlikely(request_len < data_size)) 76 return -EINVAL; 77 78 return 0; 79 } 80 81 static int reply(struct ib_mad_hdr *smp) 82 { 83 /* 84 * The verbs framework will handle the directed/LID route 85 * packet changes. 86 */ 87 smp->method = IB_MGMT_METHOD_GET_RESP; 88 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 89 smp->status |= IB_SMP_DIRECTION; 90 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 91 } 92 93 static inline void clear_opa_smp_data(struct opa_smp *smp) 94 { 95 void *data = opa_get_smp_data(smp); 96 size_t size = opa_get_smp_data_size(smp); 97 98 memset(data, 0, size); 99 } 100 101 static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx) 102 { 103 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 104 105 if (pkey_idx < ARRAY_SIZE(ppd->pkeys)) 106 return ppd->pkeys[pkey_idx]; 107 108 return 0; 109 } 110 111 void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port) 112 { 113 struct ib_event event; 114 115 event.event = IB_EVENT_PKEY_CHANGE; 116 event.device = &dd->verbs_dev.rdi.ibdev; 117 event.element.port_num = port; 118 ib_dispatch_event(&event); 119 } 120 121 /* 122 * If the port is down, clean up all pending traps. We need to be careful 123 * with the given trap, because it may be queued. 124 */ 125 static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap) 126 { 127 struct trap_node *node, *q; 128 unsigned long flags; 129 struct list_head trap_list; 130 int i; 131 132 for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) { 133 spin_lock_irqsave(&ibp->rvp.lock, flags); 134 list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list); 135 ibp->rvp.trap_lists[i].list_len = 0; 136 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 137 138 /* 139 * Remove all items from the list, freeing all the non-given 140 * traps. 141 */ 142 list_for_each_entry_safe(node, q, &trap_list, list) { 143 list_del(&node->list); 144 if (node != trap) 145 kfree(node); 146 } 147 } 148 149 /* 150 * If this wasn't on one of the lists it would not be freed. If it 151 * was on the list, it is now safe to free. 152 */ 153 kfree(trap); 154 } 155 156 static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp, 157 struct trap_node *trap) 158 { 159 struct trap_node *node; 160 struct trap_list *trap_list; 161 unsigned long flags; 162 unsigned long timeout; 163 int found = 0; 164 unsigned int queue_id; 165 static int trap_count; 166 167 queue_id = trap->data.generic_type & 0x0F; 168 if (queue_id >= RVT_MAX_TRAP_LISTS) { 169 trap_count++; 170 pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n", 171 trap->data.generic_type, trap_count); 172 kfree(trap); 173 return NULL; 174 } 175 176 /* 177 * Since the retry (handle timeout) does not remove a trap request 178 * from the list, all we have to do is compare the node. 179 */ 180 spin_lock_irqsave(&ibp->rvp.lock, flags); 181 trap_list = &ibp->rvp.trap_lists[queue_id]; 182 183 list_for_each_entry(node, &trap_list->list, list) { 184 if (node == trap) { 185 node->retry++; 186 found = 1; 187 break; 188 } 189 } 190 191 /* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */ 192 if (!found) { 193 if (trap_list->list_len < RVT_MAX_TRAP_LEN) { 194 trap_list->list_len++; 195 list_add_tail(&trap->list, &trap_list->list); 196 } else { 197 pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n", 198 trap->data.generic_type); 199 kfree(trap); 200 } 201 } 202 203 /* 204 * Next check to see if there is a timer pending. If not, set it up 205 * and get the first trap from the list. 206 */ 207 node = NULL; 208 if (!timer_pending(&ibp->rvp.trap_timer)) { 209 /* 210 * o14-2 211 * If the time out is set we have to wait until it expires 212 * before the trap can be sent. 213 * This should be > RVT_TRAP_TIMEOUT 214 */ 215 timeout = (RVT_TRAP_TIMEOUT * 216 (1UL << ibp->rvp.subnet_timeout)) / 1000; 217 mod_timer(&ibp->rvp.trap_timer, 218 jiffies + usecs_to_jiffies(timeout)); 219 node = list_first_entry(&trap_list->list, struct trap_node, 220 list); 221 node->in_use = 1; 222 } 223 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 224 225 return node; 226 } 227 228 static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp, 229 struct opa_smp *smp) 230 { 231 struct trap_list *trap_list; 232 struct trap_node *trap; 233 unsigned long flags; 234 int i; 235 236 if (smp->attr_id != IB_SMP_ATTR_NOTICE) 237 return; 238 239 spin_lock_irqsave(&ibp->rvp.lock, flags); 240 for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) { 241 trap_list = &ibp->rvp.trap_lists[i]; 242 trap = list_first_entry_or_null(&trap_list->list, 243 struct trap_node, list); 244 if (trap && trap->tid == smp->tid) { 245 if (trap->in_use) { 246 trap->repress = 1; 247 } else { 248 trap_list->list_len--; 249 list_del(&trap->list); 250 kfree(trap); 251 } 252 break; 253 } 254 } 255 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 256 } 257 258 static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp, 259 struct rdma_ah_attr *attr, u32 dlid) 260 { 261 rdma_ah_set_dlid(attr, dlid); 262 rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port); 263 if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { 264 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 265 266 rdma_ah_set_ah_flags(attr, IB_AH_GRH); 267 grh->sgid_index = 0; 268 grh->hop_limit = 1; 269 grh->dgid.global.subnet_prefix = 270 ibp->rvp.gid_prefix; 271 grh->dgid.global.interface_id = OPA_MAKE_ID(dlid); 272 } 273 } 274 275 static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp, 276 struct rvt_ah *ah, u32 dlid) 277 { 278 struct rdma_ah_attr attr; 279 struct rvt_qp *qp0; 280 int ret = -EINVAL; 281 282 memset(&attr, 0, sizeof(attr)); 283 attr.type = ah->ibah.type; 284 hfi1_update_sm_ah_attr(ibp, &attr, dlid); 285 rcu_read_lock(); 286 qp0 = rcu_dereference(ibp->rvp.qp[0]); 287 if (qp0) 288 ret = rdma_modify_ah(&ah->ibah, &attr); 289 rcu_read_unlock(); 290 return ret; 291 } 292 293 static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid) 294 { 295 struct rdma_ah_attr attr; 296 struct ib_ah *ah = ERR_PTR(-EINVAL); 297 struct rvt_qp *qp0; 298 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 299 struct hfi1_devdata *dd = dd_from_ppd(ppd); 300 u8 port_num = ppd->port; 301 302 memset(&attr, 0, sizeof(attr)); 303 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num); 304 hfi1_update_sm_ah_attr(ibp, &attr, dlid); 305 rcu_read_lock(); 306 qp0 = rcu_dereference(ibp->rvp.qp[0]); 307 if (qp0) 308 ah = rdma_create_ah(qp0->ibqp.pd, &attr); 309 rcu_read_unlock(); 310 return ah; 311 } 312 313 static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap) 314 { 315 struct ib_mad_send_buf *send_buf; 316 struct ib_mad_agent *agent; 317 struct opa_smp *smp; 318 unsigned long flags; 319 int pkey_idx; 320 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp; 321 322 agent = ibp->rvp.send_agent; 323 if (!agent) { 324 cleanup_traps(ibp, trap); 325 return; 326 } 327 328 /* o14-3.2.1 */ 329 if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) { 330 cleanup_traps(ibp, trap); 331 return; 332 } 333 334 /* Add the trap to the list if necessary and see if we can send it */ 335 trap = check_and_add_trap(ibp, trap); 336 if (!trap) 337 return; 338 339 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY); 340 if (pkey_idx < 0) { 341 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n", 342 __func__, hfi1_get_pkey(ibp, 1)); 343 pkey_idx = 1; 344 } 345 346 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0, 347 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 348 GFP_ATOMIC, IB_MGMT_BASE_VERSION); 349 if (IS_ERR(send_buf)) 350 return; 351 352 smp = send_buf->mad; 353 smp->base_version = OPA_MGMT_BASE_VERSION; 354 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 355 smp->class_version = OPA_SM_CLASS_VERSION; 356 smp->method = IB_MGMT_METHOD_TRAP; 357 358 /* Only update the transaction ID for new traps (o13-5). */ 359 if (trap->tid == 0) { 360 ibp->rvp.tid++; 361 /* make sure that tid != 0 */ 362 if (ibp->rvp.tid == 0) 363 ibp->rvp.tid++; 364 trap->tid = cpu_to_be64(ibp->rvp.tid); 365 } 366 smp->tid = trap->tid; 367 368 smp->attr_id = IB_SMP_ATTR_NOTICE; 369 /* o14-1: smp->mkey = 0; */ 370 371 memcpy(smp->route.lid.data, &trap->data, trap->len); 372 373 spin_lock_irqsave(&ibp->rvp.lock, flags); 374 if (!ibp->rvp.sm_ah) { 375 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { 376 struct ib_ah *ah; 377 378 ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid); 379 if (IS_ERR(ah)) { 380 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 381 return; 382 } 383 send_buf->ah = ah; 384 ibp->rvp.sm_ah = ibah_to_rvtah(ah); 385 } else { 386 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 387 return; 388 } 389 } else { 390 send_buf->ah = &ibp->rvp.sm_ah->ibah; 391 } 392 393 /* 394 * If the trap was repressed while things were getting set up, don't 395 * bother sending it. This could happen for a retry. 396 */ 397 if (trap->repress) { 398 list_del(&trap->list); 399 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 400 kfree(trap); 401 ib_free_send_mad(send_buf); 402 return; 403 } 404 405 trap->in_use = 0; 406 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 407 408 if (ib_post_send_mad(send_buf, NULL)) 409 ib_free_send_mad(send_buf); 410 } 411 412 void hfi1_handle_trap_timer(struct timer_list *t) 413 { 414 struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer); 415 struct trap_node *trap = NULL; 416 unsigned long flags; 417 int i; 418 419 /* Find the trap with the highest priority */ 420 spin_lock_irqsave(&ibp->rvp.lock, flags); 421 for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) { 422 trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list, 423 struct trap_node, list); 424 } 425 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 426 427 if (trap) 428 send_trap(ibp, trap); 429 } 430 431 static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid) 432 { 433 struct trap_node *trap; 434 435 trap = kzalloc(sizeof(*trap), GFP_ATOMIC); 436 if (!trap) 437 return NULL; 438 439 INIT_LIST_HEAD(&trap->list); 440 trap->data.generic_type = type; 441 trap->data.prod_type_lsb = IB_NOTICE_PROD_CA; 442 trap->data.trap_num = trap_num; 443 trap->data.issuer_lid = cpu_to_be32(lid); 444 445 return trap; 446 } 447 448 /* 449 * Send a bad P_Key trap (ch. 14.3.8). 450 */ 451 void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl, 452 u32 qp1, u32 qp2, u32 lid1, u32 lid2) 453 { 454 struct trap_node *trap; 455 u32 lid = ppd_from_ibp(ibp)->lid; 456 457 ibp->rvp.n_pkt_drops++; 458 ibp->rvp.pkey_violations++; 459 460 trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY, 461 lid); 462 if (!trap) 463 return; 464 465 /* Send violation trap */ 466 trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1); 467 trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2); 468 trap->data.ntc_257_258.key = cpu_to_be32(key); 469 trap->data.ntc_257_258.sl = sl << 3; 470 trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1); 471 trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2); 472 473 trap->len = sizeof(trap->data); 474 send_trap(ibp, trap); 475 } 476 477 /* 478 * Send a bad M_Key trap (ch. 14.3.9). 479 */ 480 static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad, 481 __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt) 482 { 483 struct trap_node *trap; 484 u32 lid = ppd_from_ibp(ibp)->lid; 485 486 trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY, 487 lid); 488 if (!trap) 489 return; 490 491 /* Send violation trap */ 492 trap->data.ntc_256.lid = trap->data.issuer_lid; 493 trap->data.ntc_256.method = mad->method; 494 trap->data.ntc_256.attr_id = mad->attr_id; 495 trap->data.ntc_256.attr_mod = mad->attr_mod; 496 trap->data.ntc_256.mkey = mkey; 497 if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 498 trap->data.ntc_256.dr_slid = dr_slid; 499 trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; 500 if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) { 501 trap->data.ntc_256.dr_trunc_hop |= 502 IB_NOTICE_TRAP_DR_TRUNC; 503 hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path); 504 } 505 trap->data.ntc_256.dr_trunc_hop |= hop_cnt; 506 memcpy(trap->data.ntc_256.dr_rtn_path, return_path, 507 hop_cnt); 508 } 509 510 trap->len = sizeof(trap->data); 511 512 send_trap(ibp, trap); 513 } 514 515 /* 516 * Send a Port Capability Mask Changed trap (ch. 14.3.11). 517 */ 518 void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num) 519 { 520 struct trap_node *trap; 521 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 522 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); 523 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data; 524 u32 lid = ppd_from_ibp(ibp)->lid; 525 526 trap = create_trap_node(IB_NOTICE_TYPE_INFO, 527 OPA_TRAP_CHANGE_CAPABILITY, 528 lid); 529 if (!trap) 530 return; 531 532 trap->data.ntc_144.lid = trap->data.issuer_lid; 533 trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); 534 trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags); 535 536 trap->len = sizeof(trap->data); 537 send_trap(ibp, trap); 538 } 539 540 /* 541 * Send a System Image GUID Changed trap (ch. 14.3.12). 542 */ 543 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp) 544 { 545 struct trap_node *trap; 546 u32 lid = ppd_from_ibp(ibp)->lid; 547 548 trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID, 549 lid); 550 if (!trap) 551 return; 552 553 trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid; 554 trap->data.ntc_145.lid = trap->data.issuer_lid; 555 556 trap->len = sizeof(trap->data); 557 send_trap(ibp, trap); 558 } 559 560 /* 561 * Send a Node Description Changed trap (ch. 14.3.13). 562 */ 563 void hfi1_node_desc_chg(struct hfi1_ibport *ibp) 564 { 565 struct trap_node *trap; 566 u32 lid = ppd_from_ibp(ibp)->lid; 567 568 trap = create_trap_node(IB_NOTICE_TYPE_INFO, 569 OPA_TRAP_CHANGE_CAPABILITY, 570 lid); 571 if (!trap) 572 return; 573 574 trap->data.ntc_144.lid = trap->data.issuer_lid; 575 trap->data.ntc_144.change_flags = 576 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG); 577 578 trap->len = sizeof(trap->data); 579 send_trap(ibp, trap); 580 } 581 582 static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am, 583 u8 *data, struct ib_device *ibdev, 584 u8 port, u32 *resp_len, u32 max_len) 585 { 586 struct opa_node_description *nd; 587 588 if (am || smp_length_check(sizeof(*nd), max_len)) { 589 smp->status |= IB_SMP_INVALID_FIELD; 590 return reply((struct ib_mad_hdr *)smp); 591 } 592 593 nd = (struct opa_node_description *)data; 594 595 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data)); 596 597 if (resp_len) 598 *resp_len += sizeof(*nd); 599 600 return reply((struct ib_mad_hdr *)smp); 601 } 602 603 static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data, 604 struct ib_device *ibdev, u8 port, 605 u32 *resp_len, u32 max_len) 606 { 607 struct opa_node_info *ni; 608 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 609 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */ 610 611 ni = (struct opa_node_info *)data; 612 613 /* GUID 0 is illegal */ 614 if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 || 615 smp_length_check(sizeof(*ni), max_len) || 616 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) { 617 smp->status |= IB_SMP_INVALID_FIELD; 618 return reply((struct ib_mad_hdr *)smp); 619 } 620 621 ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); 622 ni->base_version = OPA_MGMT_BASE_VERSION; 623 ni->class_version = OPA_SM_CLASS_VERSION; 624 ni->node_type = 1; /* channel adapter */ 625 ni->num_ports = ibdev->phys_port_cnt; 626 /* This is already in network order */ 627 ni->system_image_guid = ib_hfi1_sys_image_guid; 628 ni->node_guid = ibdev->node_guid; 629 ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd)); 630 ni->device_id = cpu_to_be16(dd->pcidev->device); 631 ni->revision = cpu_to_be32(dd->minrev); 632 ni->local_port_num = port; 633 ni->vendor_id[0] = dd->oui1; 634 ni->vendor_id[1] = dd->oui2; 635 ni->vendor_id[2] = dd->oui3; 636 637 if (resp_len) 638 *resp_len += sizeof(*ni); 639 640 return reply((struct ib_mad_hdr *)smp); 641 } 642 643 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, 644 u8 port) 645 { 646 struct ib_node_info *nip = (struct ib_node_info *)&smp->data; 647 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 648 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */ 649 650 /* GUID 0 is illegal */ 651 if (smp->attr_mod || pidx >= dd->num_pports || 652 ibdev->node_guid == 0 || 653 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) { 654 smp->status |= IB_SMP_INVALID_FIELD; 655 return reply((struct ib_mad_hdr *)smp); 656 } 657 658 nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); 659 nip->base_version = OPA_MGMT_BASE_VERSION; 660 nip->class_version = OPA_SM_CLASS_VERSION; 661 nip->node_type = 1; /* channel adapter */ 662 nip->num_ports = ibdev->phys_port_cnt; 663 /* This is already in network order */ 664 nip->sys_guid = ib_hfi1_sys_image_guid; 665 nip->node_guid = ibdev->node_guid; 666 nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd)); 667 nip->device_id = cpu_to_be16(dd->pcidev->device); 668 nip->revision = cpu_to_be32(dd->minrev); 669 nip->local_port_num = port; 670 nip->vendor_id[0] = dd->oui1; 671 nip->vendor_id[1] = dd->oui2; 672 nip->vendor_id[2] = dd->oui3; 673 674 return reply((struct ib_mad_hdr *)smp); 675 } 676 677 static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w) 678 { 679 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w); 680 } 681 682 static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w) 683 { 684 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w); 685 } 686 687 static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s) 688 { 689 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s); 690 } 691 692 static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad, 693 int mad_flags, __be64 mkey, __be32 dr_slid, 694 u8 return_path[], u8 hop_cnt) 695 { 696 int valid_mkey = 0; 697 int ret = 0; 698 699 /* Is the mkey in the process of expiring? */ 700 if (ibp->rvp.mkey_lease_timeout && 701 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) { 702 /* Clear timeout and mkey protection field. */ 703 ibp->rvp.mkey_lease_timeout = 0; 704 ibp->rvp.mkeyprot = 0; 705 } 706 707 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 || 708 ibp->rvp.mkey == mkey) 709 valid_mkey = 1; 710 711 /* Unset lease timeout on any valid Get/Set/TrapRepress */ 712 if (valid_mkey && ibp->rvp.mkey_lease_timeout && 713 (mad->method == IB_MGMT_METHOD_GET || 714 mad->method == IB_MGMT_METHOD_SET || 715 mad->method == IB_MGMT_METHOD_TRAP_REPRESS)) 716 ibp->rvp.mkey_lease_timeout = 0; 717 718 if (!valid_mkey) { 719 switch (mad->method) { 720 case IB_MGMT_METHOD_GET: 721 /* Bad mkey not a violation below level 2 */ 722 if (ibp->rvp.mkeyprot < 2) 723 break; 724 /* fall through */ 725 case IB_MGMT_METHOD_SET: 726 case IB_MGMT_METHOD_TRAP_REPRESS: 727 if (ibp->rvp.mkey_violations != 0xFFFF) 728 ++ibp->rvp.mkey_violations; 729 if (!ibp->rvp.mkey_lease_timeout && 730 ibp->rvp.mkey_lease_period) 731 ibp->rvp.mkey_lease_timeout = jiffies + 732 ibp->rvp.mkey_lease_period * HZ; 733 /* Generate a trap notice. */ 734 bad_mkey(ibp, mad, mkey, dr_slid, return_path, 735 hop_cnt); 736 ret = 1; 737 } 738 } 739 740 return ret; 741 } 742 743 /* 744 * The SMA caches reads from LCB registers in case the LCB is unavailable. 745 * (The LCB is unavailable in certain link states, for example.) 746 */ 747 struct lcb_datum { 748 u32 off; 749 u64 val; 750 }; 751 752 static struct lcb_datum lcb_cache[] = { 753 { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 }, 754 }; 755 756 static int write_lcb_cache(u32 off, u64 val) 757 { 758 int i; 759 760 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 761 if (lcb_cache[i].off == off) { 762 lcb_cache[i].val = val; 763 return 0; 764 } 765 } 766 767 pr_warn("%s bad offset 0x%x\n", __func__, off); 768 return -1; 769 } 770 771 static int read_lcb_cache(u32 off, u64 *val) 772 { 773 int i; 774 775 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 776 if (lcb_cache[i].off == off) { 777 *val = lcb_cache[i].val; 778 return 0; 779 } 780 } 781 782 pr_warn("%s bad offset 0x%x\n", __func__, off); 783 return -1; 784 } 785 786 void read_ltp_rtt(struct hfi1_devdata *dd) 787 { 788 u64 reg; 789 790 if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, ®)) 791 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__); 792 else 793 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg); 794 } 795 796 static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, 797 struct ib_device *ibdev, u8 port, 798 u32 *resp_len, u32 max_len) 799 { 800 int i; 801 struct hfi1_devdata *dd; 802 struct hfi1_pportdata *ppd; 803 struct hfi1_ibport *ibp; 804 struct opa_port_info *pi = (struct opa_port_info *)data; 805 u8 mtu; 806 u8 credit_rate; 807 u8 is_beaconing_active; 808 u32 state; 809 u32 num_ports = OPA_AM_NPORT(am); 810 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); 811 u32 buffer_units; 812 u64 tmp = 0; 813 814 if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) { 815 smp->status |= IB_SMP_INVALID_FIELD; 816 return reply((struct ib_mad_hdr *)smp); 817 } 818 819 dd = dd_from_ibdev(ibdev); 820 /* IB numbers ports from 1, hw from 0 */ 821 ppd = dd->pport + (port - 1); 822 ibp = &ppd->ibport_data; 823 824 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || 825 ppd->vls_supported > ARRAY_SIZE(dd->vld)) { 826 smp->status |= IB_SMP_INVALID_FIELD; 827 return reply((struct ib_mad_hdr *)smp); 828 } 829 830 pi->lid = cpu_to_be32(ppd->lid); 831 832 /* Only return the mkey if the protection field allows it. */ 833 if (!(smp->method == IB_MGMT_METHOD_GET && 834 ibp->rvp.mkey != smp->mkey && 835 ibp->rvp.mkeyprot == 1)) 836 pi->mkey = ibp->rvp.mkey; 837 838 pi->subnet_prefix = ibp->rvp.gid_prefix; 839 pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid); 840 pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); 841 pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period); 842 pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp); 843 pi->sa_qp = cpu_to_be32(ppd->sa_qp); 844 845 pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled); 846 pi->link_width.supported = cpu_to_be16(ppd->link_width_supported); 847 pi->link_width.active = cpu_to_be16(ppd->link_width_active); 848 849 pi->link_width_downgrade.supported = 850 cpu_to_be16(ppd->link_width_downgrade_supported); 851 pi->link_width_downgrade.enabled = 852 cpu_to_be16(ppd->link_width_downgrade_enabled); 853 pi->link_width_downgrade.tx_active = 854 cpu_to_be16(ppd->link_width_downgrade_tx_active); 855 pi->link_width_downgrade.rx_active = 856 cpu_to_be16(ppd->link_width_downgrade_rx_active); 857 858 pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported); 859 pi->link_speed.active = cpu_to_be16(ppd->link_speed_active); 860 pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled); 861 862 state = driver_lstate(ppd); 863 864 if (start_of_sm_config && (state == IB_PORT_INIT)) 865 ppd->is_sm_config_started = 1; 866 867 pi->port_phys_conf = (ppd->port_type & 0xf); 868 869 pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4; 870 pi->port_states.ledenable_offlinereason |= 871 ppd->is_sm_config_started << 5; 872 /* 873 * This pairs with the memory barrier in hfi1_start_led_override to 874 * ensure that we read the correct state of LED beaconing represented 875 * by led_override_timer_active 876 */ 877 smp_rmb(); 878 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); 879 pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6; 880 pi->port_states.ledenable_offlinereason |= 881 ppd->offline_disabled_reason; 882 883 pi->port_states.portphysstate_portstate = 884 (driver_pstate(ppd) << 4) | state; 885 886 pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; 887 888 memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu)); 889 for (i = 0; i < ppd->vls_supported; i++) { 890 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU); 891 if ((i % 2) == 0) 892 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4); 893 else 894 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu; 895 } 896 /* don't forget VL 15 */ 897 mtu = mtu_to_enum(dd->vld[15].mtu, 2048); 898 pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu; 899 pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL; 900 pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS); 901 pi->partenforce_filterraw |= 902 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON); 903 if (ppd->part_enforce & HFI1_PART_ENFORCE_IN) 904 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN; 905 if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT) 906 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT; 907 pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations); 908 /* P_KeyViolations are counted by hardware. */ 909 pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations); 910 pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations); 911 912 pi->vl.cap = ppd->vls_supported; 913 pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit); 914 pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP); 915 pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP); 916 917 pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout; 918 919 pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 | 920 OPA_PORT_LINK_MODE_OPA << 5 | 921 OPA_PORT_LINK_MODE_OPA); 922 923 pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode); 924 925 pi->port_mode = cpu_to_be16( 926 ppd->is_active_optimize_enabled ? 927 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0); 928 929 pi->port_packet_format.supported = 930 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B | 931 OPA_PORT_PACKET_FORMAT_16B); 932 pi->port_packet_format.enabled = 933 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B | 934 OPA_PORT_PACKET_FORMAT_16B); 935 936 /* flit_control.interleave is (OPA V1, version .76): 937 * bits use 938 * ---- --- 939 * 2 res 940 * 2 DistanceSupported 941 * 2 DistanceEnabled 942 * 5 MaxNextLevelTxEnabled 943 * 5 MaxNestLevelRxSupported 944 * 945 * HFI supports only "distance mode 1" (see OPA V1, version .76, 946 * section 9.6.2), so set DistanceSupported, DistanceEnabled 947 * to 0x1. 948 */ 949 pi->flit_control.interleave = cpu_to_be16(0x1400); 950 951 pi->link_down_reason = ppd->local_link_down_reason.sma; 952 pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma; 953 pi->port_error_action = cpu_to_be32(ppd->port_error_action); 954 pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096); 955 956 /* 32.768 usec. response time (guessing) */ 957 pi->resptimevalue = 3; 958 959 pi->local_port_num = port; 960 961 /* buffer info for FM */ 962 pi->overall_buffer_space = cpu_to_be16(dd->link_credits); 963 964 pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid); 965 pi->neigh_port_num = ppd->neighbor_port_number; 966 pi->port_neigh_mode = 967 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) | 968 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) | 969 (ppd->neighbor_fm_security ? 970 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0); 971 972 /* HFIs shall always return VL15 credits to their 973 * neighbor in a timely manner, without any credit return pacing. 974 */ 975 credit_rate = 0; 976 buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC; 977 buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK; 978 buffer_units |= (credit_rate << 6) & 979 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE; 980 buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT; 981 pi->buffer_units = cpu_to_be32(buffer_units); 982 983 pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags); 984 pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7) 985 << 3 | (OPA_MCAST_NR & 0x7)); 986 987 /* HFI supports a replay buffer 128 LTPs in size */ 988 pi->replay_depth.buffer = 0x80; 989 /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 990 read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp); 991 992 /* 993 * this counter is 16 bits wide, but the replay_depth.wire 994 * variable is only 8 bits 995 */ 996 if (tmp > 0xff) 997 tmp = 0xff; 998 pi->replay_depth.wire = tmp; 999 1000 if (resp_len) 1001 *resp_len += sizeof(struct opa_port_info); 1002 1003 return reply((struct ib_mad_hdr *)smp); 1004 } 1005 1006 /** 1007 * get_pkeys - return the PKEY table 1008 * @dd: the hfi1_ib device 1009 * @port: the IB port number 1010 * @pkeys: the pkey table is placed here 1011 */ 1012 static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) 1013 { 1014 struct hfi1_pportdata *ppd = dd->pport + port - 1; 1015 1016 memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys)); 1017 1018 return 0; 1019 } 1020 1021 static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, 1022 struct ib_device *ibdev, u8 port, 1023 u32 *resp_len, u32 max_len) 1024 { 1025 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1026 u32 n_blocks_req = OPA_AM_NBLK(am); 1027 u32 start_block = am & 0x7ff; 1028 __be16 *p; 1029 u16 *q; 1030 int i; 1031 u16 n_blocks_avail; 1032 unsigned npkeys = hfi1_get_npkeys(dd); 1033 size_t size; 1034 1035 if (n_blocks_req == 0) { 1036 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n", 1037 port, start_block, n_blocks_req); 1038 smp->status |= IB_SMP_INVALID_FIELD; 1039 return reply((struct ib_mad_hdr *)smp); 1040 } 1041 1042 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; 1043 1044 size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16); 1045 1046 if (smp_length_check(size, max_len)) { 1047 smp->status |= IB_SMP_INVALID_FIELD; 1048 return reply((struct ib_mad_hdr *)smp); 1049 } 1050 1051 if (start_block + n_blocks_req > n_blocks_avail || 1052 n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) { 1053 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; " 1054 "avail 0x%x; blk/smp 0x%lx\n", 1055 start_block, n_blocks_req, n_blocks_avail, 1056 OPA_NUM_PKEY_BLOCKS_PER_SMP); 1057 smp->status |= IB_SMP_INVALID_FIELD; 1058 return reply((struct ib_mad_hdr *)smp); 1059 } 1060 1061 p = (__be16 *)data; 1062 q = (u16 *)data; 1063 /* get the real pkeys if we are requesting the first block */ 1064 if (start_block == 0) { 1065 get_pkeys(dd, port, q); 1066 for (i = 0; i < npkeys; i++) 1067 p[i] = cpu_to_be16(q[i]); 1068 if (resp_len) 1069 *resp_len += size; 1070 } else { 1071 smp->status |= IB_SMP_INVALID_FIELD; 1072 } 1073 return reply((struct ib_mad_hdr *)smp); 1074 } 1075 1076 enum { 1077 HFI_TRANSITION_DISALLOWED, 1078 HFI_TRANSITION_IGNORED, 1079 HFI_TRANSITION_ALLOWED, 1080 HFI_TRANSITION_UNDEFINED, 1081 }; 1082 1083 /* 1084 * Use shortened names to improve readability of 1085 * {logical,physical}_state_transitions 1086 */ 1087 enum { 1088 __D = HFI_TRANSITION_DISALLOWED, 1089 __I = HFI_TRANSITION_IGNORED, 1090 __A = HFI_TRANSITION_ALLOWED, 1091 __U = HFI_TRANSITION_UNDEFINED, 1092 }; 1093 1094 /* 1095 * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are 1096 * represented in physical_state_transitions. 1097 */ 1098 #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1) 1099 1100 /* 1101 * Within physical_state_transitions, rows represent "old" states, 1102 * columns "new" states, and physical_state_transitions.allowed[old][new] 1103 * indicates if the transition from old state to new state is legal (see 1104 * OPAg1v1, Table 6-4). 1105 */ 1106 static const struct { 1107 u8 allowed[__N_PHYSTATES][__N_PHYSTATES]; 1108 } physical_state_transitions = { 1109 { 1110 /* 2 3 4 5 6 7 8 9 10 11 */ 1111 /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D }, 1112 /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A }, 1113 /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, 1114 /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D }, 1115 /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, 1116 /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D }, 1117 /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, 1118 /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D }, 1119 /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U }, 1120 /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I }, 1121 } 1122 }; 1123 1124 /* 1125 * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented 1126 * logical_state_transitions 1127 */ 1128 1129 #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1) 1130 1131 /* 1132 * Within logical_state_transitions rows represent "old" states, 1133 * columns "new" states, and logical_state_transitions.allowed[old][new] 1134 * indicates if the transition from old state to new state is legal (see 1135 * OPAg1v1, Table 9-12). 1136 */ 1137 static const struct { 1138 u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES]; 1139 } logical_state_transitions = { 1140 { 1141 /* 1 2 3 4 5 */ 1142 /* 1 */ { __I, __D, __D, __D, __U}, 1143 /* 2 */ { __D, __I, __A, __D, __U}, 1144 /* 3 */ { __D, __D, __I, __A, __U}, 1145 /* 4 */ { __D, __D, __I, __I, __U}, 1146 /* 5 */ { __U, __U, __U, __U, __U}, 1147 } 1148 }; 1149 1150 static int logical_transition_allowed(int old, int new) 1151 { 1152 if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER || 1153 new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) { 1154 pr_warn("invalid logical state(s) (old %d new %d)\n", 1155 old, new); 1156 return HFI_TRANSITION_UNDEFINED; 1157 } 1158 1159 if (new == IB_PORT_NOP) 1160 return HFI_TRANSITION_ALLOWED; /* always allowed */ 1161 1162 /* adjust states for indexing into logical_state_transitions */ 1163 old -= IB_PORT_DOWN; 1164 new -= IB_PORT_DOWN; 1165 1166 if (old < 0 || new < 0) 1167 return HFI_TRANSITION_UNDEFINED; 1168 return logical_state_transitions.allowed[old][new]; 1169 } 1170 1171 static int physical_transition_allowed(int old, int new) 1172 { 1173 if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX || 1174 new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) { 1175 pr_warn("invalid physical state(s) (old %d new %d)\n", 1176 old, new); 1177 return HFI_TRANSITION_UNDEFINED; 1178 } 1179 1180 if (new == IB_PORTPHYSSTATE_NOP) 1181 return HFI_TRANSITION_ALLOWED; /* always allowed */ 1182 1183 /* adjust states for indexing into physical_state_transitions */ 1184 old -= IB_PORTPHYSSTATE_POLLING; 1185 new -= IB_PORTPHYSSTATE_POLLING; 1186 1187 if (old < 0 || new < 0) 1188 return HFI_TRANSITION_UNDEFINED; 1189 return physical_state_transitions.allowed[old][new]; 1190 } 1191 1192 static int port_states_transition_allowed(struct hfi1_pportdata *ppd, 1193 u32 logical_new, u32 physical_new) 1194 { 1195 u32 physical_old = driver_pstate(ppd); 1196 u32 logical_old = driver_lstate(ppd); 1197 int ret, logical_allowed, physical_allowed; 1198 1199 ret = logical_transition_allowed(logical_old, logical_new); 1200 logical_allowed = ret; 1201 1202 if (ret == HFI_TRANSITION_DISALLOWED || 1203 ret == HFI_TRANSITION_UNDEFINED) { 1204 pr_warn("invalid logical state transition %s -> %s\n", 1205 opa_lstate_name(logical_old), 1206 opa_lstate_name(logical_new)); 1207 return ret; 1208 } 1209 1210 ret = physical_transition_allowed(physical_old, physical_new); 1211 physical_allowed = ret; 1212 1213 if (ret == HFI_TRANSITION_DISALLOWED || 1214 ret == HFI_TRANSITION_UNDEFINED) { 1215 pr_warn("invalid physical state transition %s -> %s\n", 1216 opa_pstate_name(physical_old), 1217 opa_pstate_name(physical_new)); 1218 return ret; 1219 } 1220 1221 if (logical_allowed == HFI_TRANSITION_IGNORED && 1222 physical_allowed == HFI_TRANSITION_IGNORED) 1223 return HFI_TRANSITION_IGNORED; 1224 1225 /* 1226 * A change request of Physical Port State from 1227 * 'Offline' to 'Polling' should be ignored. 1228 */ 1229 if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) && 1230 (physical_new == IB_PORTPHYSSTATE_POLLING)) 1231 return HFI_TRANSITION_IGNORED; 1232 1233 /* 1234 * Either physical_allowed or logical_allowed is 1235 * HFI_TRANSITION_ALLOWED. 1236 */ 1237 return HFI_TRANSITION_ALLOWED; 1238 } 1239 1240 static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp, 1241 u32 logical_state, u32 phys_state) 1242 { 1243 struct hfi1_devdata *dd = ppd->dd; 1244 u32 link_state; 1245 int ret; 1246 1247 ret = port_states_transition_allowed(ppd, logical_state, phys_state); 1248 if (ret == HFI_TRANSITION_DISALLOWED || 1249 ret == HFI_TRANSITION_UNDEFINED) { 1250 /* error message emitted above */ 1251 smp->status |= IB_SMP_INVALID_FIELD; 1252 return 0; 1253 } 1254 1255 if (ret == HFI_TRANSITION_IGNORED) 1256 return 0; 1257 1258 if ((phys_state != IB_PORTPHYSSTATE_NOP) && 1259 !(logical_state == IB_PORT_DOWN || 1260 logical_state == IB_PORT_NOP)){ 1261 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n", 1262 logical_state, phys_state); 1263 smp->status |= IB_SMP_INVALID_FIELD; 1264 } 1265 1266 /* 1267 * Logical state changes are summarized in OPAv1g1 spec., 1268 * Table 9-12; physical state changes are summarized in 1269 * OPAv1g1 spec., Table 6.4. 1270 */ 1271 switch (logical_state) { 1272 case IB_PORT_NOP: 1273 if (phys_state == IB_PORTPHYSSTATE_NOP) 1274 break; 1275 /* FALLTHROUGH */ 1276 case IB_PORT_DOWN: 1277 if (phys_state == IB_PORTPHYSSTATE_NOP) { 1278 link_state = HLS_DN_DOWNDEF; 1279 } else if (phys_state == IB_PORTPHYSSTATE_POLLING) { 1280 link_state = HLS_DN_POLL; 1281 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE, 1282 0, OPA_LINKDOWN_REASON_FM_BOUNCE); 1283 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) { 1284 link_state = HLS_DN_DISABLE; 1285 } else { 1286 pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n", 1287 phys_state); 1288 smp->status |= IB_SMP_INVALID_FIELD; 1289 break; 1290 } 1291 1292 if ((link_state == HLS_DN_POLL || 1293 link_state == HLS_DN_DOWNDEF)) { 1294 /* 1295 * Going to poll. No matter what the current state, 1296 * always move offline first, then tune and start the 1297 * link. This correctly handles a FM link bounce and 1298 * a link enable. Going offline is a no-op if already 1299 * offline. 1300 */ 1301 set_link_state(ppd, HLS_DN_OFFLINE); 1302 start_link(ppd); 1303 } else { 1304 set_link_state(ppd, link_state); 1305 } 1306 if (link_state == HLS_DN_DISABLE && 1307 (ppd->offline_disabled_reason > 1308 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) || 1309 ppd->offline_disabled_reason == 1310 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) 1311 ppd->offline_disabled_reason = 1312 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); 1313 /* 1314 * Don't send a reply if the response would be sent 1315 * through the disabled port. 1316 */ 1317 if (link_state == HLS_DN_DISABLE && smp->hop_cnt) 1318 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 1319 break; 1320 case IB_PORT_ARMED: 1321 ret = set_link_state(ppd, HLS_UP_ARMED); 1322 if (!ret) 1323 send_idle_sma(dd, SMA_IDLE_ARM); 1324 break; 1325 case IB_PORT_ACTIVE: 1326 if (ppd->neighbor_normal) { 1327 ret = set_link_state(ppd, HLS_UP_ACTIVE); 1328 if (ret == 0) 1329 send_idle_sma(dd, SMA_IDLE_ACTIVE); 1330 } else { 1331 pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n"); 1332 smp->status |= IB_SMP_INVALID_FIELD; 1333 } 1334 break; 1335 default: 1336 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n", 1337 logical_state); 1338 smp->status |= IB_SMP_INVALID_FIELD; 1339 } 1340 1341 return 0; 1342 } 1343 1344 /** 1345 * subn_set_opa_portinfo - set port information 1346 * @smp: the incoming SM packet 1347 * @ibdev: the infiniband device 1348 * @port: the port on the device 1349 * 1350 */ 1351 static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, 1352 struct ib_device *ibdev, u8 port, 1353 u32 *resp_len, u32 max_len) 1354 { 1355 struct opa_port_info *pi = (struct opa_port_info *)data; 1356 struct ib_event event; 1357 struct hfi1_devdata *dd; 1358 struct hfi1_pportdata *ppd; 1359 struct hfi1_ibport *ibp; 1360 u8 clientrereg; 1361 unsigned long flags; 1362 u32 smlid; 1363 u32 lid; 1364 u8 ls_old, ls_new, ps_new; 1365 u8 vls; 1366 u8 msl; 1367 u8 crc_enabled; 1368 u16 lse, lwe, mtu; 1369 u32 num_ports = OPA_AM_NPORT(am); 1370 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); 1371 int ret, i, invalid = 0, call_set_mtu = 0; 1372 int call_link_downgrade_policy = 0; 1373 1374 if (num_ports != 1 || 1375 smp_length_check(sizeof(*pi), max_len)) { 1376 smp->status |= IB_SMP_INVALID_FIELD; 1377 return reply((struct ib_mad_hdr *)smp); 1378 } 1379 1380 lid = be32_to_cpu(pi->lid); 1381 if (lid & 0xFF000000) { 1382 pr_warn("OPA_PortInfo lid out of range: %X\n", lid); 1383 smp->status |= IB_SMP_INVALID_FIELD; 1384 goto get_only; 1385 } 1386 1387 1388 smlid = be32_to_cpu(pi->sm_lid); 1389 if (smlid & 0xFF000000) { 1390 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid); 1391 smp->status |= IB_SMP_INVALID_FIELD; 1392 goto get_only; 1393 } 1394 1395 clientrereg = (pi->clientrereg_subnettimeout & 1396 OPA_PI_MASK_CLIENT_REREGISTER); 1397 1398 dd = dd_from_ibdev(ibdev); 1399 /* IB numbers ports from 1, hw from 0 */ 1400 ppd = dd->pport + (port - 1); 1401 ibp = &ppd->ibport_data; 1402 event.device = ibdev; 1403 event.element.port_num = port; 1404 1405 ls_old = driver_lstate(ppd); 1406 1407 ibp->rvp.mkey = pi->mkey; 1408 if (ibp->rvp.gid_prefix != pi->subnet_prefix) { 1409 ibp->rvp.gid_prefix = pi->subnet_prefix; 1410 event.event = IB_EVENT_GID_CHANGE; 1411 ib_dispatch_event(&event); 1412 } 1413 ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period); 1414 1415 /* Must be a valid unicast LID address. */ 1416 if ((lid == 0 && ls_old > IB_PORT_INIT) || 1417 (hfi1_is_16B_mcast(lid))) { 1418 smp->status |= IB_SMP_INVALID_FIELD; 1419 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n", 1420 lid); 1421 } else if (ppd->lid != lid || 1422 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) { 1423 if (ppd->lid != lid) 1424 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT); 1425 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) 1426 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT); 1427 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC); 1428 event.event = IB_EVENT_LID_CHANGE; 1429 ib_dispatch_event(&event); 1430 1431 if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) { 1432 /* Manufacture GID from LID to support extended 1433 * addresses 1434 */ 1435 ppd->guids[HFI1_PORT_GUID_INDEX + 1] = 1436 be64_to_cpu(OPA_MAKE_ID(lid)); 1437 event.event = IB_EVENT_GID_CHANGE; 1438 ib_dispatch_event(&event); 1439 } 1440 } 1441 1442 msl = pi->smsl & OPA_PI_MASK_SMSL; 1443 if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON) 1444 ppd->linkinit_reason = 1445 (pi->partenforce_filterraw & 1446 OPA_PI_MASK_LINKINIT_REASON); 1447 1448 /* Must be a valid unicast LID address. */ 1449 if ((smlid == 0 && ls_old > IB_PORT_INIT) || 1450 (hfi1_is_16B_mcast(smlid))) { 1451 smp->status |= IB_SMP_INVALID_FIELD; 1452 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid); 1453 } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { 1454 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid); 1455 spin_lock_irqsave(&ibp->rvp.lock, flags); 1456 if (ibp->rvp.sm_ah) { 1457 if (smlid != ibp->rvp.sm_lid) 1458 hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid); 1459 if (msl != ibp->rvp.sm_sl) 1460 rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl); 1461 } 1462 spin_unlock_irqrestore(&ibp->rvp.lock, flags); 1463 if (smlid != ibp->rvp.sm_lid) 1464 ibp->rvp.sm_lid = smlid; 1465 if (msl != ibp->rvp.sm_sl) 1466 ibp->rvp.sm_sl = msl; 1467 event.event = IB_EVENT_SM_CHANGE; 1468 ib_dispatch_event(&event); 1469 } 1470 1471 if (pi->link_down_reason == 0) { 1472 ppd->local_link_down_reason.sma = 0; 1473 ppd->local_link_down_reason.latest = 0; 1474 } 1475 1476 if (pi->neigh_link_down_reason == 0) { 1477 ppd->neigh_link_down_reason.sma = 0; 1478 ppd->neigh_link_down_reason.latest = 0; 1479 } 1480 1481 ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp); 1482 ppd->sa_qp = be32_to_cpu(pi->sa_qp); 1483 1484 ppd->port_error_action = be32_to_cpu(pi->port_error_action); 1485 lwe = be16_to_cpu(pi->link_width.enabled); 1486 if (lwe) { 1487 if (lwe == OPA_LINK_WIDTH_RESET || 1488 lwe == OPA_LINK_WIDTH_RESET_OLD) 1489 set_link_width_enabled(ppd, ppd->link_width_supported); 1490 else if ((lwe & ~ppd->link_width_supported) == 0) 1491 set_link_width_enabled(ppd, lwe); 1492 else 1493 smp->status |= IB_SMP_INVALID_FIELD; 1494 } 1495 lwe = be16_to_cpu(pi->link_width_downgrade.enabled); 1496 /* LWD.E is always applied - 0 means "disabled" */ 1497 if (lwe == OPA_LINK_WIDTH_RESET || 1498 lwe == OPA_LINK_WIDTH_RESET_OLD) { 1499 set_link_width_downgrade_enabled(ppd, 1500 ppd-> 1501 link_width_downgrade_supported 1502 ); 1503 } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) { 1504 /* only set and apply if something changed */ 1505 if (lwe != ppd->link_width_downgrade_enabled) { 1506 set_link_width_downgrade_enabled(ppd, lwe); 1507 call_link_downgrade_policy = 1; 1508 } 1509 } else { 1510 smp->status |= IB_SMP_INVALID_FIELD; 1511 } 1512 lse = be16_to_cpu(pi->link_speed.enabled); 1513 if (lse) { 1514 if (lse & be16_to_cpu(pi->link_speed.supported)) 1515 set_link_speed_enabled(ppd, lse); 1516 else 1517 smp->status |= IB_SMP_INVALID_FIELD; 1518 } 1519 1520 ibp->rvp.mkeyprot = 1521 (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6; 1522 ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF; 1523 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT, 1524 ibp->rvp.vl_high_limit); 1525 1526 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || 1527 ppd->vls_supported > ARRAY_SIZE(dd->vld)) { 1528 smp->status |= IB_SMP_INVALID_FIELD; 1529 return reply((struct ib_mad_hdr *)smp); 1530 } 1531 for (i = 0; i < ppd->vls_supported; i++) { 1532 if ((i % 2) == 0) 1533 mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >> 1534 4) & 0xF); 1535 else 1536 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] & 1537 0xF); 1538 if (mtu == 0xffff) { 1539 pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n", 1540 mtu, 1541 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF); 1542 smp->status |= IB_SMP_INVALID_FIELD; 1543 mtu = hfi1_max_mtu; /* use a valid MTU */ 1544 } 1545 if (dd->vld[i].mtu != mtu) { 1546 dd_dev_info(dd, 1547 "MTU change on vl %d from %d to %d\n", 1548 i, dd->vld[i].mtu, mtu); 1549 dd->vld[i].mtu = mtu; 1550 call_set_mtu++; 1551 } 1552 } 1553 /* As per OPAV1 spec: VL15 must support and be configured 1554 * for operation with a 2048 or larger MTU. 1555 */ 1556 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF); 1557 if (mtu < 2048 || mtu == 0xffff) 1558 mtu = 2048; 1559 if (dd->vld[15].mtu != mtu) { 1560 dd_dev_info(dd, 1561 "MTU change on vl 15 from %d to %d\n", 1562 dd->vld[15].mtu, mtu); 1563 dd->vld[15].mtu = mtu; 1564 call_set_mtu++; 1565 } 1566 if (call_set_mtu) 1567 set_mtu(ppd); 1568 1569 /* Set operational VLs */ 1570 vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL; 1571 if (vls) { 1572 if (vls > ppd->vls_supported) { 1573 pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n", 1574 pi->operational_vls); 1575 smp->status |= IB_SMP_INVALID_FIELD; 1576 } else { 1577 if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS, 1578 vls) == -EINVAL) 1579 smp->status |= IB_SMP_INVALID_FIELD; 1580 } 1581 } 1582 1583 if (pi->mkey_violations == 0) 1584 ibp->rvp.mkey_violations = 0; 1585 1586 if (pi->pkey_violations == 0) 1587 ibp->rvp.pkey_violations = 0; 1588 1589 if (pi->qkey_violations == 0) 1590 ibp->rvp.qkey_violations = 0; 1591 1592 ibp->rvp.subnet_timeout = 1593 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT; 1594 1595 crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode); 1596 crc_enabled >>= 4; 1597 crc_enabled &= 0xf; 1598 1599 if (crc_enabled != 0) 1600 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled); 1601 1602 ppd->is_active_optimize_enabled = 1603 !!(be16_to_cpu(pi->port_mode) 1604 & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE); 1605 1606 ls_new = pi->port_states.portphysstate_portstate & 1607 OPA_PI_MASK_PORT_STATE; 1608 ps_new = (pi->port_states.portphysstate_portstate & 1609 OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4; 1610 1611 if (ls_old == IB_PORT_INIT) { 1612 if (start_of_sm_config) { 1613 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED)) 1614 ppd->is_sm_config_started = 1; 1615 } else if (ls_new == IB_PORT_ARMED) { 1616 if (ppd->is_sm_config_started == 0) { 1617 invalid = 1; 1618 smp->status |= IB_SMP_INVALID_FIELD; 1619 } 1620 } 1621 } 1622 1623 /* Handle CLIENT_REREGISTER event b/c SM asked us for it */ 1624 if (clientrereg) { 1625 event.event = IB_EVENT_CLIENT_REREGISTER; 1626 ib_dispatch_event(&event); 1627 } 1628 1629 /* 1630 * Do the port state change now that the other link parameters 1631 * have been set. 1632 * Changing the port physical state only makes sense if the link 1633 * is down or is being set to down. 1634 */ 1635 1636 if (!invalid) { 1637 ret = set_port_states(ppd, smp, ls_new, ps_new); 1638 if (ret) 1639 return ret; 1640 } 1641 1642 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, 1643 max_len); 1644 1645 /* restore re-reg bit per o14-12.2.1 */ 1646 pi->clientrereg_subnettimeout |= clientrereg; 1647 1648 /* 1649 * Apply the new link downgrade policy. This may result in a link 1650 * bounce. Do this after everything else so things are settled. 1651 * Possible problem: if setting the port state above fails, then 1652 * the policy change is not applied. 1653 */ 1654 if (call_link_downgrade_policy) 1655 apply_link_downgrade_policy(ppd, 0); 1656 1657 return ret; 1658 1659 get_only: 1660 return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, 1661 max_len); 1662 } 1663 1664 /** 1665 * set_pkeys - set the PKEY table for ctxt 0 1666 * @dd: the hfi1_ib device 1667 * @port: the IB port number 1668 * @pkeys: the PKEY table 1669 */ 1670 static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) 1671 { 1672 struct hfi1_pportdata *ppd; 1673 int i; 1674 int changed = 0; 1675 int update_includes_mgmt_partition = 0; 1676 1677 /* 1678 * IB port one/two always maps to context zero/one, 1679 * always a kernel context, no locking needed 1680 * If we get here with ppd setup, no need to check 1681 * that rcd is valid. 1682 */ 1683 ppd = dd->pport + (port - 1); 1684 /* 1685 * If the update does not include the management pkey, don't do it. 1686 */ 1687 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 1688 if (pkeys[i] == LIM_MGMT_P_KEY) { 1689 update_includes_mgmt_partition = 1; 1690 break; 1691 } 1692 } 1693 1694 if (!update_includes_mgmt_partition) 1695 return 1; 1696 1697 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { 1698 u16 key = pkeys[i]; 1699 u16 okey = ppd->pkeys[i]; 1700 1701 if (key == okey) 1702 continue; 1703 /* 1704 * The SM gives us the complete PKey table. We have 1705 * to ensure that we put the PKeys in the matching 1706 * slots. 1707 */ 1708 ppd->pkeys[i] = key; 1709 changed = 1; 1710 } 1711 1712 if (changed) { 1713 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 1714 hfi1_event_pkey_change(dd, port); 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, 1721 struct ib_device *ibdev, u8 port, 1722 u32 *resp_len, u32 max_len) 1723 { 1724 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1725 u32 n_blocks_sent = OPA_AM_NBLK(am); 1726 u32 start_block = am & 0x7ff; 1727 u16 *p = (u16 *)data; 1728 __be16 *q = (__be16 *)data; 1729 int i; 1730 u16 n_blocks_avail; 1731 unsigned npkeys = hfi1_get_npkeys(dd); 1732 u32 size = 0; 1733 1734 if (n_blocks_sent == 0) { 1735 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n", 1736 port, start_block, n_blocks_sent); 1737 smp->status |= IB_SMP_INVALID_FIELD; 1738 return reply((struct ib_mad_hdr *)smp); 1739 } 1740 1741 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; 1742 1743 size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE); 1744 1745 if (smp_length_check(size, max_len)) { 1746 smp->status |= IB_SMP_INVALID_FIELD; 1747 return reply((struct ib_mad_hdr *)smp); 1748 } 1749 1750 if (start_block + n_blocks_sent > n_blocks_avail || 1751 n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) { 1752 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n", 1753 start_block, n_blocks_sent, n_blocks_avail, 1754 OPA_NUM_PKEY_BLOCKS_PER_SMP); 1755 smp->status |= IB_SMP_INVALID_FIELD; 1756 return reply((struct ib_mad_hdr *)smp); 1757 } 1758 1759 for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++) 1760 p[i] = be16_to_cpu(q[i]); 1761 1762 if (start_block == 0 && set_pkeys(dd, port, p) != 0) { 1763 smp->status |= IB_SMP_INVALID_FIELD; 1764 return reply((struct ib_mad_hdr *)smp); 1765 } 1766 1767 return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len, 1768 max_len); 1769 } 1770 1771 #define ILLEGAL_VL 12 1772 /* 1773 * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except 1774 * for SC15, which must map to VL15). If we don't remap things this 1775 * way it is possible for VL15 counters to increment when we try to 1776 * send on a SC which is mapped to an invalid VL. 1777 * When getting the table convert ILLEGAL_VL back to VL15. 1778 */ 1779 static void filter_sc2vlt(void *data, bool set) 1780 { 1781 int i; 1782 u8 *pd = data; 1783 1784 for (i = 0; i < OPA_MAX_SCS; i++) { 1785 if (i == 15) 1786 continue; 1787 1788 if (set) { 1789 if ((pd[i] & 0x1f) == 0xf) 1790 pd[i] = ILLEGAL_VL; 1791 } else { 1792 if ((pd[i] & 0x1f) == ILLEGAL_VL) 1793 pd[i] = 0xf; 1794 } 1795 } 1796 } 1797 1798 static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data) 1799 { 1800 u64 *val = data; 1801 1802 filter_sc2vlt(data, true); 1803 1804 write_csr(dd, SEND_SC2VLT0, *val++); 1805 write_csr(dd, SEND_SC2VLT1, *val++); 1806 write_csr(dd, SEND_SC2VLT2, *val++); 1807 write_csr(dd, SEND_SC2VLT3, *val++); 1808 write_seqlock_irq(&dd->sc2vl_lock); 1809 memcpy(dd->sc2vl, data, sizeof(dd->sc2vl)); 1810 write_sequnlock_irq(&dd->sc2vl_lock); 1811 return 0; 1812 } 1813 1814 static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data) 1815 { 1816 u64 *val = (u64 *)data; 1817 1818 *val++ = read_csr(dd, SEND_SC2VLT0); 1819 *val++ = read_csr(dd, SEND_SC2VLT1); 1820 *val++ = read_csr(dd, SEND_SC2VLT2); 1821 *val++ = read_csr(dd, SEND_SC2VLT3); 1822 1823 filter_sc2vlt((u64 *)data, false); 1824 return 0; 1825 } 1826 1827 static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, 1828 struct ib_device *ibdev, u8 port, 1829 u32 *resp_len, u32 max_len) 1830 { 1831 struct hfi1_ibport *ibp = to_iport(ibdev, port); 1832 u8 *p = data; 1833 size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */ 1834 unsigned i; 1835 1836 if (am || smp_length_check(size, max_len)) { 1837 smp->status |= IB_SMP_INVALID_FIELD; 1838 return reply((struct ib_mad_hdr *)smp); 1839 } 1840 1841 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) 1842 *p++ = ibp->sl_to_sc[i]; 1843 1844 if (resp_len) 1845 *resp_len += size; 1846 1847 return reply((struct ib_mad_hdr *)smp); 1848 } 1849 1850 static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, 1851 struct ib_device *ibdev, u8 port, 1852 u32 *resp_len, u32 max_len) 1853 { 1854 struct hfi1_ibport *ibp = to_iport(ibdev, port); 1855 u8 *p = data; 1856 size_t size = ARRAY_SIZE(ibp->sl_to_sc); 1857 int i; 1858 u8 sc; 1859 1860 if (am || smp_length_check(size, max_len)) { 1861 smp->status |= IB_SMP_INVALID_FIELD; 1862 return reply((struct ib_mad_hdr *)smp); 1863 } 1864 1865 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) { 1866 sc = *p++; 1867 if (ibp->sl_to_sc[i] != sc) { 1868 ibp->sl_to_sc[i] = sc; 1869 1870 /* Put all stale qps into error state */ 1871 hfi1_error_port_qps(ibp, i); 1872 } 1873 } 1874 1875 return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len, 1876 max_len); 1877 } 1878 1879 static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, 1880 struct ib_device *ibdev, u8 port, 1881 u32 *resp_len, u32 max_len) 1882 { 1883 struct hfi1_ibport *ibp = to_iport(ibdev, port); 1884 u8 *p = data; 1885 size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */ 1886 unsigned i; 1887 1888 if (am || smp_length_check(size, max_len)) { 1889 smp->status |= IB_SMP_INVALID_FIELD; 1890 return reply((struct ib_mad_hdr *)smp); 1891 } 1892 1893 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++) 1894 *p++ = ibp->sc_to_sl[i]; 1895 1896 if (resp_len) 1897 *resp_len += size; 1898 1899 return reply((struct ib_mad_hdr *)smp); 1900 } 1901 1902 static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, 1903 struct ib_device *ibdev, u8 port, 1904 u32 *resp_len, u32 max_len) 1905 { 1906 struct hfi1_ibport *ibp = to_iport(ibdev, port); 1907 size_t size = ARRAY_SIZE(ibp->sc_to_sl); 1908 u8 *p = data; 1909 int i; 1910 1911 if (am || smp_length_check(size, max_len)) { 1912 smp->status |= IB_SMP_INVALID_FIELD; 1913 return reply((struct ib_mad_hdr *)smp); 1914 } 1915 1916 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++) 1917 ibp->sc_to_sl[i] = *p++; 1918 1919 return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len, 1920 max_len); 1921 } 1922 1923 static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, 1924 struct ib_device *ibdev, u8 port, 1925 u32 *resp_len, u32 max_len) 1926 { 1927 u32 n_blocks = OPA_AM_NBLK(am); 1928 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1929 void *vp = (void *)data; 1930 size_t size = 4 * sizeof(u64); 1931 1932 if (n_blocks != 1 || smp_length_check(size, max_len)) { 1933 smp->status |= IB_SMP_INVALID_FIELD; 1934 return reply((struct ib_mad_hdr *)smp); 1935 } 1936 1937 get_sc2vlt_tables(dd, vp); 1938 1939 if (resp_len) 1940 *resp_len += size; 1941 1942 return reply((struct ib_mad_hdr *)smp); 1943 } 1944 1945 static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, 1946 struct ib_device *ibdev, u8 port, 1947 u32 *resp_len, u32 max_len) 1948 { 1949 u32 n_blocks = OPA_AM_NBLK(am); 1950 int async_update = OPA_AM_ASYNC(am); 1951 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1952 void *vp = (void *)data; 1953 struct hfi1_pportdata *ppd; 1954 int lstate; 1955 /* 1956 * set_sc2vlt_tables writes the information contained in *data 1957 * to four 64-bit registers SendSC2VLt[0-3]. We need to make 1958 * sure *max_len is not greater than the total size of the four 1959 * SendSC2VLt[0-3] registers. 1960 */ 1961 size_t size = 4 * sizeof(u64); 1962 1963 if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) { 1964 smp->status |= IB_SMP_INVALID_FIELD; 1965 return reply((struct ib_mad_hdr *)smp); 1966 } 1967 1968 /* IB numbers ports from 1, hw from 0 */ 1969 ppd = dd->pport + (port - 1); 1970 lstate = driver_lstate(ppd); 1971 /* 1972 * it's known that async_update is 0 by this point, but include 1973 * the explicit check for clarity 1974 */ 1975 if (!async_update && 1976 (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) { 1977 smp->status |= IB_SMP_INVALID_FIELD; 1978 return reply((struct ib_mad_hdr *)smp); 1979 } 1980 1981 set_sc2vlt_tables(dd, vp); 1982 1983 return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len, 1984 max_len); 1985 } 1986 1987 static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, 1988 struct ib_device *ibdev, u8 port, 1989 u32 *resp_len, u32 max_len) 1990 { 1991 u32 n_blocks = OPA_AM_NPORT(am); 1992 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1993 struct hfi1_pportdata *ppd; 1994 void *vp = (void *)data; 1995 int size = sizeof(struct sc2vlnt); 1996 1997 if (n_blocks != 1 || smp_length_check(size, max_len)) { 1998 smp->status |= IB_SMP_INVALID_FIELD; 1999 return reply((struct ib_mad_hdr *)smp); 2000 } 2001 2002 ppd = dd->pport + (port - 1); 2003 2004 fm_get_table(ppd, FM_TBL_SC2VLNT, vp); 2005 2006 if (resp_len) 2007 *resp_len += size; 2008 2009 return reply((struct ib_mad_hdr *)smp); 2010 } 2011 2012 static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, 2013 struct ib_device *ibdev, u8 port, 2014 u32 *resp_len, u32 max_len) 2015 { 2016 u32 n_blocks = OPA_AM_NPORT(am); 2017 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2018 struct hfi1_pportdata *ppd; 2019 void *vp = (void *)data; 2020 int lstate; 2021 int size = sizeof(struct sc2vlnt); 2022 2023 if (n_blocks != 1 || smp_length_check(size, max_len)) { 2024 smp->status |= IB_SMP_INVALID_FIELD; 2025 return reply((struct ib_mad_hdr *)smp); 2026 } 2027 2028 /* IB numbers ports from 1, hw from 0 */ 2029 ppd = dd->pport + (port - 1); 2030 lstate = driver_lstate(ppd); 2031 if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) { 2032 smp->status |= IB_SMP_INVALID_FIELD; 2033 return reply((struct ib_mad_hdr *)smp); 2034 } 2035 2036 ppd = dd->pport + (port - 1); 2037 2038 fm_set_table(ppd, FM_TBL_SC2VLNT, vp); 2039 2040 return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, 2041 resp_len, max_len); 2042 } 2043 2044 static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, 2045 struct ib_device *ibdev, u8 port, 2046 u32 *resp_len, u32 max_len) 2047 { 2048 u32 nports = OPA_AM_NPORT(am); 2049 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); 2050 u32 lstate; 2051 struct hfi1_ibport *ibp; 2052 struct hfi1_pportdata *ppd; 2053 struct opa_port_state_info *psi = (struct opa_port_state_info *)data; 2054 2055 if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) { 2056 smp->status |= IB_SMP_INVALID_FIELD; 2057 return reply((struct ib_mad_hdr *)smp); 2058 } 2059 2060 ibp = to_iport(ibdev, port); 2061 ppd = ppd_from_ibp(ibp); 2062 2063 lstate = driver_lstate(ppd); 2064 2065 if (start_of_sm_config && (lstate == IB_PORT_INIT)) 2066 ppd->is_sm_config_started = 1; 2067 2068 psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4; 2069 psi->port_states.ledenable_offlinereason |= 2070 ppd->is_sm_config_started << 5; 2071 psi->port_states.ledenable_offlinereason |= 2072 ppd->offline_disabled_reason; 2073 2074 psi->port_states.portphysstate_portstate = 2075 (driver_pstate(ppd) << 4) | (lstate & 0xf); 2076 psi->link_width_downgrade_tx_active = 2077 cpu_to_be16(ppd->link_width_downgrade_tx_active); 2078 psi->link_width_downgrade_rx_active = 2079 cpu_to_be16(ppd->link_width_downgrade_rx_active); 2080 if (resp_len) 2081 *resp_len += sizeof(struct opa_port_state_info); 2082 2083 return reply((struct ib_mad_hdr *)smp); 2084 } 2085 2086 static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, 2087 struct ib_device *ibdev, u8 port, 2088 u32 *resp_len, u32 max_len) 2089 { 2090 u32 nports = OPA_AM_NPORT(am); 2091 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); 2092 u32 ls_old; 2093 u8 ls_new, ps_new; 2094 struct hfi1_ibport *ibp; 2095 struct hfi1_pportdata *ppd; 2096 struct opa_port_state_info *psi = (struct opa_port_state_info *)data; 2097 int ret, invalid = 0; 2098 2099 if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) { 2100 smp->status |= IB_SMP_INVALID_FIELD; 2101 return reply((struct ib_mad_hdr *)smp); 2102 } 2103 2104 ibp = to_iport(ibdev, port); 2105 ppd = ppd_from_ibp(ibp); 2106 2107 ls_old = driver_lstate(ppd); 2108 2109 ls_new = port_states_to_logical_state(&psi->port_states); 2110 ps_new = port_states_to_phys_state(&psi->port_states); 2111 2112 if (ls_old == IB_PORT_INIT) { 2113 if (start_of_sm_config) { 2114 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED)) 2115 ppd->is_sm_config_started = 1; 2116 } else if (ls_new == IB_PORT_ARMED) { 2117 if (ppd->is_sm_config_started == 0) { 2118 invalid = 1; 2119 smp->status |= IB_SMP_INVALID_FIELD; 2120 } 2121 } 2122 } 2123 2124 if (!invalid) { 2125 ret = set_port_states(ppd, smp, ls_new, ps_new); 2126 if (ret) 2127 return ret; 2128 } 2129 2130 return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len, 2131 max_len); 2132 } 2133 2134 static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, 2135 struct ib_device *ibdev, u8 port, 2136 u32 *resp_len, u32 max_len) 2137 { 2138 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2139 u32 addr = OPA_AM_CI_ADDR(am); 2140 u32 len = OPA_AM_CI_LEN(am) + 1; 2141 int ret; 2142 2143 if (dd->pport->port_type != PORT_TYPE_QSFP || 2144 smp_length_check(len, max_len)) { 2145 smp->status |= IB_SMP_INVALID_FIELD; 2146 return reply((struct ib_mad_hdr *)smp); 2147 } 2148 2149 #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ 2150 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) 2151 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) 2152 2153 /* 2154 * check that addr is within spec, and 2155 * addr and (addr + len - 1) are on the same "page" 2156 */ 2157 if (addr >= 4096 || 2158 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) { 2159 smp->status |= IB_SMP_INVALID_FIELD; 2160 return reply((struct ib_mad_hdr *)smp); 2161 } 2162 2163 ret = get_cable_info(dd, port, addr, len, data); 2164 2165 if (ret == -ENODEV) { 2166 smp->status |= IB_SMP_UNSUP_METH_ATTR; 2167 return reply((struct ib_mad_hdr *)smp); 2168 } 2169 2170 /* The address range for the CableInfo SMA query is wider than the 2171 * memory available on the QSFP cable. We want to return a valid 2172 * response, albeit zeroed out, for address ranges beyond available 2173 * memory but that are within the CableInfo query spec 2174 */ 2175 if (ret < 0 && ret != -ERANGE) { 2176 smp->status |= IB_SMP_INVALID_FIELD; 2177 return reply((struct ib_mad_hdr *)smp); 2178 } 2179 2180 if (resp_len) 2181 *resp_len += len; 2182 2183 return reply((struct ib_mad_hdr *)smp); 2184 } 2185 2186 static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, 2187 struct ib_device *ibdev, u8 port, u32 *resp_len, 2188 u32 max_len) 2189 { 2190 u32 num_ports = OPA_AM_NPORT(am); 2191 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2192 struct hfi1_pportdata *ppd; 2193 struct buffer_control *p = (struct buffer_control *)data; 2194 int size = sizeof(struct buffer_control); 2195 2196 if (num_ports != 1 || smp_length_check(size, max_len)) { 2197 smp->status |= IB_SMP_INVALID_FIELD; 2198 return reply((struct ib_mad_hdr *)smp); 2199 } 2200 2201 ppd = dd->pport + (port - 1); 2202 fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p); 2203 trace_bct_get(dd, p); 2204 if (resp_len) 2205 *resp_len += size; 2206 2207 return reply((struct ib_mad_hdr *)smp); 2208 } 2209 2210 static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, 2211 struct ib_device *ibdev, u8 port, u32 *resp_len, 2212 u32 max_len) 2213 { 2214 u32 num_ports = OPA_AM_NPORT(am); 2215 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2216 struct hfi1_pportdata *ppd; 2217 struct buffer_control *p = (struct buffer_control *)data; 2218 2219 if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) { 2220 smp->status |= IB_SMP_INVALID_FIELD; 2221 return reply((struct ib_mad_hdr *)smp); 2222 } 2223 ppd = dd->pport + (port - 1); 2224 trace_bct_set(dd, p); 2225 if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) { 2226 smp->status |= IB_SMP_INVALID_FIELD; 2227 return reply((struct ib_mad_hdr *)smp); 2228 } 2229 2230 return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len, 2231 max_len); 2232 } 2233 2234 static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, 2235 struct ib_device *ibdev, u8 port, 2236 u32 *resp_len, u32 max_len) 2237 { 2238 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); 2239 u32 num_ports = OPA_AM_NPORT(am); 2240 u8 section = (am & 0x00ff0000) >> 16; 2241 u8 *p = data; 2242 int size = 256; 2243 2244 if (num_ports != 1 || smp_length_check(size, max_len)) { 2245 smp->status |= IB_SMP_INVALID_FIELD; 2246 return reply((struct ib_mad_hdr *)smp); 2247 } 2248 2249 switch (section) { 2250 case OPA_VLARB_LOW_ELEMENTS: 2251 fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p); 2252 break; 2253 case OPA_VLARB_HIGH_ELEMENTS: 2254 fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p); 2255 break; 2256 case OPA_VLARB_PREEMPT_ELEMENTS: 2257 fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p); 2258 break; 2259 case OPA_VLARB_PREEMPT_MATRIX: 2260 fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p); 2261 break; 2262 default: 2263 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n", 2264 be32_to_cpu(smp->attr_mod)); 2265 smp->status |= IB_SMP_INVALID_FIELD; 2266 size = 0; 2267 break; 2268 } 2269 2270 if (size > 0 && resp_len) 2271 *resp_len += size; 2272 2273 return reply((struct ib_mad_hdr *)smp); 2274 } 2275 2276 static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, 2277 struct ib_device *ibdev, u8 port, 2278 u32 *resp_len, u32 max_len) 2279 { 2280 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); 2281 u32 num_ports = OPA_AM_NPORT(am); 2282 u8 section = (am & 0x00ff0000) >> 16; 2283 u8 *p = data; 2284 int size = 256; 2285 2286 if (num_ports != 1 || smp_length_check(size, max_len)) { 2287 smp->status |= IB_SMP_INVALID_FIELD; 2288 return reply((struct ib_mad_hdr *)smp); 2289 } 2290 2291 switch (section) { 2292 case OPA_VLARB_LOW_ELEMENTS: 2293 (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p); 2294 break; 2295 case OPA_VLARB_HIGH_ELEMENTS: 2296 (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p); 2297 break; 2298 /* 2299 * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX 2300 * can be changed from the default values 2301 */ 2302 case OPA_VLARB_PREEMPT_ELEMENTS: 2303 /* FALLTHROUGH */ 2304 case OPA_VLARB_PREEMPT_MATRIX: 2305 smp->status |= IB_SMP_UNSUP_METH_ATTR; 2306 break; 2307 default: 2308 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n", 2309 be32_to_cpu(smp->attr_mod)); 2310 smp->status |= IB_SMP_INVALID_FIELD; 2311 break; 2312 } 2313 2314 return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len, 2315 max_len); 2316 } 2317 2318 struct opa_pma_mad { 2319 struct ib_mad_hdr mad_hdr; 2320 u8 data[2024]; 2321 } __packed; 2322 2323 struct opa_port_status_req { 2324 __u8 port_num; 2325 __u8 reserved[3]; 2326 __be32 vl_select_mask; 2327 }; 2328 2329 #define VL_MASK_ALL 0x000080ff 2330 2331 struct opa_port_status_rsp { 2332 __u8 port_num; 2333 __u8 reserved[3]; 2334 __be32 vl_select_mask; 2335 2336 /* Data counters */ 2337 __be64 port_xmit_data; 2338 __be64 port_rcv_data; 2339 __be64 port_xmit_pkts; 2340 __be64 port_rcv_pkts; 2341 __be64 port_multicast_xmit_pkts; 2342 __be64 port_multicast_rcv_pkts; 2343 __be64 port_xmit_wait; 2344 __be64 sw_port_congestion; 2345 __be64 port_rcv_fecn; 2346 __be64 port_rcv_becn; 2347 __be64 port_xmit_time_cong; 2348 __be64 port_xmit_wasted_bw; 2349 __be64 port_xmit_wait_data; 2350 __be64 port_rcv_bubble; 2351 __be64 port_mark_fecn; 2352 /* Error counters */ 2353 __be64 port_rcv_constraint_errors; 2354 __be64 port_rcv_switch_relay_errors; 2355 __be64 port_xmit_discards; 2356 __be64 port_xmit_constraint_errors; 2357 __be64 port_rcv_remote_physical_errors; 2358 __be64 local_link_integrity_errors; 2359 __be64 port_rcv_errors; 2360 __be64 excessive_buffer_overruns; 2361 __be64 fm_config_errors; 2362 __be32 link_error_recovery; 2363 __be32 link_downed; 2364 u8 uncorrectable_errors; 2365 2366 u8 link_quality_indicator; /* 5res, 3bit */ 2367 u8 res2[6]; 2368 struct _vls_pctrs { 2369 /* per-VL Data counters */ 2370 __be64 port_vl_xmit_data; 2371 __be64 port_vl_rcv_data; 2372 __be64 port_vl_xmit_pkts; 2373 __be64 port_vl_rcv_pkts; 2374 __be64 port_vl_xmit_wait; 2375 __be64 sw_port_vl_congestion; 2376 __be64 port_vl_rcv_fecn; 2377 __be64 port_vl_rcv_becn; 2378 __be64 port_xmit_time_cong; 2379 __be64 port_vl_xmit_wasted_bw; 2380 __be64 port_vl_xmit_wait_data; 2381 __be64 port_vl_rcv_bubble; 2382 __be64 port_vl_mark_fecn; 2383 __be64 port_vl_xmit_discards; 2384 } vls[0]; /* real array size defined by # bits set in vl_select_mask */ 2385 }; 2386 2387 enum counter_selects { 2388 CS_PORT_XMIT_DATA = (1 << 31), 2389 CS_PORT_RCV_DATA = (1 << 30), 2390 CS_PORT_XMIT_PKTS = (1 << 29), 2391 CS_PORT_RCV_PKTS = (1 << 28), 2392 CS_PORT_MCAST_XMIT_PKTS = (1 << 27), 2393 CS_PORT_MCAST_RCV_PKTS = (1 << 26), 2394 CS_PORT_XMIT_WAIT = (1 << 25), 2395 CS_SW_PORT_CONGESTION = (1 << 24), 2396 CS_PORT_RCV_FECN = (1 << 23), 2397 CS_PORT_RCV_BECN = (1 << 22), 2398 CS_PORT_XMIT_TIME_CONG = (1 << 21), 2399 CS_PORT_XMIT_WASTED_BW = (1 << 20), 2400 CS_PORT_XMIT_WAIT_DATA = (1 << 19), 2401 CS_PORT_RCV_BUBBLE = (1 << 18), 2402 CS_PORT_MARK_FECN = (1 << 17), 2403 CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16), 2404 CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15), 2405 CS_PORT_XMIT_DISCARDS = (1 << 14), 2406 CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13), 2407 CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12), 2408 CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11), 2409 CS_PORT_RCV_ERRORS = (1 << 10), 2410 CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9), 2411 CS_FM_CONFIG_ERRORS = (1 << 8), 2412 CS_LINK_ERROR_RECOVERY = (1 << 7), 2413 CS_LINK_DOWNED = (1 << 6), 2414 CS_UNCORRECTABLE_ERRORS = (1 << 5), 2415 }; 2416 2417 struct opa_clear_port_status { 2418 __be64 port_select_mask[4]; 2419 __be32 counter_select_mask; 2420 }; 2421 2422 struct opa_aggregate { 2423 __be16 attr_id; 2424 __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */ 2425 __be32 attr_mod; 2426 u8 data[0]; 2427 }; 2428 2429 #define MSK_LLI 0x000000f0 2430 #define MSK_LLI_SFT 4 2431 #define MSK_LER 0x0000000f 2432 #define MSK_LER_SFT 0 2433 #define ADD_LLI 8 2434 #define ADD_LER 2 2435 2436 /* Request contains first three fields, response contains those plus the rest */ 2437 struct opa_port_data_counters_msg { 2438 __be64 port_select_mask[4]; 2439 __be32 vl_select_mask; 2440 __be32 resolution; 2441 2442 /* Response fields follow */ 2443 struct _port_dctrs { 2444 u8 port_number; 2445 u8 reserved2[3]; 2446 __be32 link_quality_indicator; /* 29res, 3bit */ 2447 2448 /* Data counters */ 2449 __be64 port_xmit_data; 2450 __be64 port_rcv_data; 2451 __be64 port_xmit_pkts; 2452 __be64 port_rcv_pkts; 2453 __be64 port_multicast_xmit_pkts; 2454 __be64 port_multicast_rcv_pkts; 2455 __be64 port_xmit_wait; 2456 __be64 sw_port_congestion; 2457 __be64 port_rcv_fecn; 2458 __be64 port_rcv_becn; 2459 __be64 port_xmit_time_cong; 2460 __be64 port_xmit_wasted_bw; 2461 __be64 port_xmit_wait_data; 2462 __be64 port_rcv_bubble; 2463 __be64 port_mark_fecn; 2464 2465 __be64 port_error_counter_summary; 2466 /* Sum of error counts/port */ 2467 2468 struct _vls_dctrs { 2469 /* per-VL Data counters */ 2470 __be64 port_vl_xmit_data; 2471 __be64 port_vl_rcv_data; 2472 __be64 port_vl_xmit_pkts; 2473 __be64 port_vl_rcv_pkts; 2474 __be64 port_vl_xmit_wait; 2475 __be64 sw_port_vl_congestion; 2476 __be64 port_vl_rcv_fecn; 2477 __be64 port_vl_rcv_becn; 2478 __be64 port_xmit_time_cong; 2479 __be64 port_vl_xmit_wasted_bw; 2480 __be64 port_vl_xmit_wait_data; 2481 __be64 port_vl_rcv_bubble; 2482 __be64 port_vl_mark_fecn; 2483 } vls[0]; 2484 /* array size defined by #bits set in vl_select_mask*/ 2485 } port[1]; /* array size defined by #ports in attribute modifier */ 2486 }; 2487 2488 struct opa_port_error_counters64_msg { 2489 /* 2490 * Request contains first two fields, response contains the 2491 * whole magilla 2492 */ 2493 __be64 port_select_mask[4]; 2494 __be32 vl_select_mask; 2495 2496 /* Response-only fields follow */ 2497 __be32 reserved1; 2498 struct _port_ectrs { 2499 u8 port_number; 2500 u8 reserved2[7]; 2501 __be64 port_rcv_constraint_errors; 2502 __be64 port_rcv_switch_relay_errors; 2503 __be64 port_xmit_discards; 2504 __be64 port_xmit_constraint_errors; 2505 __be64 port_rcv_remote_physical_errors; 2506 __be64 local_link_integrity_errors; 2507 __be64 port_rcv_errors; 2508 __be64 excessive_buffer_overruns; 2509 __be64 fm_config_errors; 2510 __be32 link_error_recovery; 2511 __be32 link_downed; 2512 u8 uncorrectable_errors; 2513 u8 reserved3[7]; 2514 struct _vls_ectrs { 2515 __be64 port_vl_xmit_discards; 2516 } vls[0]; 2517 /* array size defined by #bits set in vl_select_mask */ 2518 } port[1]; /* array size defined by #ports in attribute modifier */ 2519 }; 2520 2521 struct opa_port_error_info_msg { 2522 __be64 port_select_mask[4]; 2523 __be32 error_info_select_mask; 2524 __be32 reserved1; 2525 struct _port_ei { 2526 u8 port_number; 2527 u8 reserved2[7]; 2528 2529 /* PortRcvErrorInfo */ 2530 struct { 2531 u8 status_and_code; 2532 union { 2533 u8 raw[17]; 2534 struct { 2535 /* EI1to12 format */ 2536 u8 packet_flit1[8]; 2537 u8 packet_flit2[8]; 2538 u8 remaining_flit_bits12; 2539 } ei1to12; 2540 struct { 2541 u8 packet_bytes[8]; 2542 u8 remaining_flit_bits; 2543 } ei13; 2544 } ei; 2545 u8 reserved3[6]; 2546 } __packed port_rcv_ei; 2547 2548 /* ExcessiveBufferOverrunInfo */ 2549 struct { 2550 u8 status_and_sc; 2551 u8 reserved4[7]; 2552 } __packed excessive_buffer_overrun_ei; 2553 2554 /* PortXmitConstraintErrorInfo */ 2555 struct { 2556 u8 status; 2557 u8 reserved5; 2558 __be16 pkey; 2559 __be32 slid; 2560 } __packed port_xmit_constraint_ei; 2561 2562 /* PortRcvConstraintErrorInfo */ 2563 struct { 2564 u8 status; 2565 u8 reserved6; 2566 __be16 pkey; 2567 __be32 slid; 2568 } __packed port_rcv_constraint_ei; 2569 2570 /* PortRcvSwitchRelayErrorInfo */ 2571 struct { 2572 u8 status_and_code; 2573 u8 reserved7[3]; 2574 __u32 error_info; 2575 } __packed port_rcv_switch_relay_ei; 2576 2577 /* UncorrectableErrorInfo */ 2578 struct { 2579 u8 status_and_code; 2580 u8 reserved8; 2581 } __packed uncorrectable_ei; 2582 2583 /* FMConfigErrorInfo */ 2584 struct { 2585 u8 status_and_code; 2586 u8 error_info; 2587 } __packed fm_config_ei; 2588 __u32 reserved9; 2589 } port[1]; /* actual array size defined by #ports in attr modifier */ 2590 }; 2591 2592 /* opa_port_error_info_msg error_info_select_mask bit definitions */ 2593 enum error_info_selects { 2594 ES_PORT_RCV_ERROR_INFO = (1 << 31), 2595 ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30), 2596 ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29), 2597 ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28), 2598 ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27), 2599 ES_UNCORRECTABLE_ERROR_INFO = (1 << 26), 2600 ES_FM_CONFIG_ERROR_INFO = (1 << 25) 2601 }; 2602 2603 static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, 2604 struct ib_device *ibdev, u32 *resp_len) 2605 { 2606 struct opa_class_port_info *p = 2607 (struct opa_class_port_info *)pmp->data; 2608 2609 memset(pmp->data, 0, sizeof(pmp->data)); 2610 2611 if (pmp->mad_hdr.attr_mod != 0) 2612 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2613 2614 p->base_version = OPA_MGMT_BASE_VERSION; 2615 p->class_version = OPA_SM_CLASS_VERSION; 2616 /* 2617 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. 2618 */ 2619 p->cap_mask2_resp_time = cpu_to_be32(18); 2620 2621 if (resp_len) 2622 *resp_len += sizeof(*p); 2623 2624 return reply((struct ib_mad_hdr *)pmp); 2625 } 2626 2627 static void a0_portstatus(struct hfi1_pportdata *ppd, 2628 struct opa_port_status_rsp *rsp, u32 vl_select_mask) 2629 { 2630 if (!is_bx(ppd->dd)) { 2631 unsigned long vl; 2632 u64 sum_vl_xmit_wait = 0; 2633 u32 vl_all_mask = VL_MASK_ALL; 2634 2635 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), 2636 8 * sizeof(vl_all_mask)) { 2637 u64 tmp = sum_vl_xmit_wait + 2638 read_port_cntr(ppd, C_TX_WAIT_VL, 2639 idx_from_vl(vl)); 2640 if (tmp < sum_vl_xmit_wait) { 2641 /* we wrapped */ 2642 sum_vl_xmit_wait = (u64)~0; 2643 break; 2644 } 2645 sum_vl_xmit_wait = tmp; 2646 } 2647 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait) 2648 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait); 2649 } 2650 } 2651 2652 /** 2653 * tx_link_width - convert link width bitmask to integer 2654 * value representing actual link width. 2655 * @link_width: width of active link 2656 * @return: return index of the bit set in link_width var 2657 * 2658 * The function convert and return the index of bit set 2659 * that indicate the current link width. 2660 */ 2661 u16 tx_link_width(u16 link_width) 2662 { 2663 int n = LINK_WIDTH_DEFAULT; 2664 u16 tx_width = n; 2665 2666 while (link_width && n) { 2667 if (link_width & (1 << (n - 1))) { 2668 tx_width = n; 2669 break; 2670 } 2671 n--; 2672 } 2673 2674 return tx_width; 2675 } 2676 2677 /** 2678 * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt 2679 * counter in unit of TXE cycle times to flit times. 2680 * @ppd: info of physical Hfi port 2681 * @link_width: width of active link 2682 * @link_speed: speed of active link 2683 * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request 2684 * and if vl value is C_VL_COUNT, it represent SendWaitCnt 2685 * counter request 2686 * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl. 2687 * 2688 * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to 2689 * flit times. Call this function to samples these counters. This 2690 * function will calculate for previous state transition and update 2691 * current state at end of function using ppd->prev_link_width and 2692 * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width. 2693 */ 2694 u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, 2695 u16 link_width, u16 link_speed, int vl) 2696 { 2697 u64 port_vl_xmit_wait_curr; 2698 u64 delta_vl_xmit_wait; 2699 u64 xmit_wait_val; 2700 2701 if (vl > C_VL_COUNT) 2702 return 0; 2703 if (vl < C_VL_COUNT) 2704 port_vl_xmit_wait_curr = 2705 read_port_cntr(ppd, C_TX_WAIT_VL, vl); 2706 else 2707 port_vl_xmit_wait_curr = 2708 read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL); 2709 2710 xmit_wait_val = 2711 port_vl_xmit_wait_curr - 2712 ppd->port_vl_xmit_wait_last[vl]; 2713 delta_vl_xmit_wait = 2714 convert_xmit_counter(xmit_wait_val, 2715 ppd->prev_link_width, 2716 link_speed); 2717 2718 ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait; 2719 ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr; 2720 ppd->prev_link_width = link_width; 2721 2722 return ppd->vl_xmit_flit_cnt[vl]; 2723 } 2724 2725 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, 2726 struct ib_device *ibdev, 2727 u8 port, u32 *resp_len) 2728 { 2729 struct opa_port_status_req *req = 2730 (struct opa_port_status_req *)pmp->data; 2731 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2732 struct opa_port_status_rsp *rsp; 2733 u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); 2734 unsigned long vl; 2735 size_t response_data_size; 2736 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; 2737 u8 port_num = req->port_num; 2738 u8 num_vls = hweight32(vl_select_mask); 2739 struct _vls_pctrs *vlinfo; 2740 struct hfi1_ibport *ibp = to_iport(ibdev, port); 2741 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 2742 int vfi; 2743 u64 tmp, tmp2; 2744 u16 link_width; 2745 u16 link_speed; 2746 2747 response_data_size = sizeof(struct opa_port_status_rsp) + 2748 num_vls * sizeof(struct _vls_pctrs); 2749 if (response_data_size > sizeof(pmp->data)) { 2750 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE; 2751 return reply((struct ib_mad_hdr *)pmp); 2752 } 2753 2754 if (nports != 1 || (port_num && port_num != port) || 2755 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) { 2756 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2757 return reply((struct ib_mad_hdr *)pmp); 2758 } 2759 2760 memset(pmp->data, 0, sizeof(pmp->data)); 2761 2762 rsp = (struct opa_port_status_rsp *)pmp->data; 2763 if (port_num) 2764 rsp->port_num = port_num; 2765 else 2766 rsp->port_num = port; 2767 2768 rsp->port_rcv_constraint_errors = 2769 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, 2770 CNTR_INVALID_VL)); 2771 2772 hfi1_read_link_quality(dd, &rsp->link_quality_indicator); 2773 2774 rsp->vl_select_mask = cpu_to_be32(vl_select_mask); 2775 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, 2776 CNTR_INVALID_VL)); 2777 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, 2778 CNTR_INVALID_VL)); 2779 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS, 2780 CNTR_INVALID_VL)); 2781 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS, 2782 CNTR_INVALID_VL)); 2783 rsp->port_multicast_xmit_pkts = 2784 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, 2785 CNTR_INVALID_VL)); 2786 rsp->port_multicast_rcv_pkts = 2787 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, 2788 CNTR_INVALID_VL)); 2789 /* 2790 * Convert PortXmitWait counter from TXE cycle times 2791 * to flit times. 2792 */ 2793 link_width = 2794 tx_link_width(ppd->link_width_downgrade_tx_active); 2795 link_speed = get_link_speed(ppd->link_speed_active); 2796 rsp->port_xmit_wait = 2797 cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 2798 link_speed, C_VL_COUNT)); 2799 rsp->port_rcv_fecn = 2800 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); 2801 rsp->port_rcv_becn = 2802 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL)); 2803 rsp->port_xmit_discards = 2804 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD, 2805 CNTR_INVALID_VL)); 2806 rsp->port_xmit_constraint_errors = 2807 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, 2808 CNTR_INVALID_VL)); 2809 rsp->port_rcv_remote_physical_errors = 2810 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, 2811 CNTR_INVALID_VL)); 2812 rsp->local_link_integrity_errors = 2813 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY, 2814 CNTR_INVALID_VL)); 2815 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); 2816 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, 2817 CNTR_INVALID_VL); 2818 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { 2819 /* overflow/wrapped */ 2820 rsp->link_error_recovery = cpu_to_be32(~0); 2821 } else { 2822 rsp->link_error_recovery = cpu_to_be32(tmp2); 2823 } 2824 rsp->port_rcv_errors = 2825 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); 2826 rsp->excessive_buffer_overruns = 2827 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL)); 2828 rsp->fm_config_errors = 2829 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, 2830 CNTR_INVALID_VL)); 2831 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, 2832 CNTR_INVALID_VL)); 2833 2834 /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */ 2835 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); 2836 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; 2837 2838 vlinfo = &rsp->vls[0]; 2839 vfi = 0; 2840 /* The vl_select_mask has been checked above, and we know 2841 * that it contains only entries which represent valid VLs. 2842 * So in the for_each_set_bit() loop below, we don't need 2843 * any additional checks for vl. 2844 */ 2845 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), 2846 8 * sizeof(vl_select_mask)) { 2847 memset(vlinfo, 0, sizeof(*vlinfo)); 2848 2849 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); 2850 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp); 2851 2852 rsp->vls[vfi].port_vl_rcv_pkts = 2853 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, 2854 idx_from_vl(vl))); 2855 2856 rsp->vls[vfi].port_vl_xmit_data = 2857 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL, 2858 idx_from_vl(vl))); 2859 2860 rsp->vls[vfi].port_vl_xmit_pkts = 2861 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, 2862 idx_from_vl(vl))); 2863 /* 2864 * Convert PortVlXmitWait counter from TXE cycle 2865 * times to flit times. 2866 */ 2867 rsp->vls[vfi].port_vl_xmit_wait = 2868 cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 2869 link_speed, 2870 idx_from_vl(vl))); 2871 2872 rsp->vls[vfi].port_vl_rcv_fecn = 2873 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, 2874 idx_from_vl(vl))); 2875 2876 rsp->vls[vfi].port_vl_rcv_becn = 2877 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL, 2878 idx_from_vl(vl))); 2879 2880 rsp->vls[vfi].port_vl_xmit_discards = 2881 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, 2882 idx_from_vl(vl))); 2883 vlinfo++; 2884 vfi++; 2885 } 2886 2887 a0_portstatus(ppd, rsp, vl_select_mask); 2888 2889 if (resp_len) 2890 *resp_len += response_data_size; 2891 2892 return reply((struct ib_mad_hdr *)pmp); 2893 } 2894 2895 static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, 2896 u8 res_lli, u8 res_ler) 2897 { 2898 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2899 struct hfi1_ibport *ibp = to_iport(ibdev, port); 2900 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 2901 u64 error_counter_summary = 0, tmp; 2902 2903 error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, 2904 CNTR_INVALID_VL); 2905 /* port_rcv_switch_relay_errors is 0 for HFIs */ 2906 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD, 2907 CNTR_INVALID_VL); 2908 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, 2909 CNTR_INVALID_VL); 2910 error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR, 2911 CNTR_INVALID_VL); 2912 /* local link integrity must be right-shifted by the lli resolution */ 2913 error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY, 2914 CNTR_INVALID_VL) >> res_lli); 2915 /* link error recovery must b right-shifted by the ler resolution */ 2916 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); 2917 tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL); 2918 error_counter_summary += (tmp >> res_ler); 2919 error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR, 2920 CNTR_INVALID_VL); 2921 error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); 2922 error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR, 2923 CNTR_INVALID_VL); 2924 /* ppd->link_downed is a 32-bit value */ 2925 error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN, 2926 CNTR_INVALID_VL); 2927 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); 2928 /* this is an 8-bit quantity */ 2929 error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff; 2930 2931 return error_counter_summary; 2932 } 2933 2934 static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, 2935 u32 vl_select_mask) 2936 { 2937 if (!is_bx(ppd->dd)) { 2938 unsigned long vl; 2939 u64 sum_vl_xmit_wait = 0; 2940 u32 vl_all_mask = VL_MASK_ALL; 2941 2942 for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), 2943 8 * sizeof(vl_all_mask)) { 2944 u64 tmp = sum_vl_xmit_wait + 2945 read_port_cntr(ppd, C_TX_WAIT_VL, 2946 idx_from_vl(vl)); 2947 if (tmp < sum_vl_xmit_wait) { 2948 /* we wrapped */ 2949 sum_vl_xmit_wait = (u64)~0; 2950 break; 2951 } 2952 sum_vl_xmit_wait = tmp; 2953 } 2954 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait) 2955 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait); 2956 } 2957 } 2958 2959 static void pma_get_opa_port_dctrs(struct ib_device *ibdev, 2960 struct _port_dctrs *rsp) 2961 { 2962 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2963 2964 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, 2965 CNTR_INVALID_VL)); 2966 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, 2967 CNTR_INVALID_VL)); 2968 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS, 2969 CNTR_INVALID_VL)); 2970 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS, 2971 CNTR_INVALID_VL)); 2972 rsp->port_multicast_xmit_pkts = 2973 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, 2974 CNTR_INVALID_VL)); 2975 rsp->port_multicast_rcv_pkts = 2976 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, 2977 CNTR_INVALID_VL)); 2978 } 2979 2980 static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, 2981 struct ib_device *ibdev, 2982 u8 port, u32 *resp_len) 2983 { 2984 struct opa_port_data_counters_msg *req = 2985 (struct opa_port_data_counters_msg *)pmp->data; 2986 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 2987 struct hfi1_ibport *ibp = to_iport(ibdev, port); 2988 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 2989 struct _port_dctrs *rsp; 2990 struct _vls_dctrs *vlinfo; 2991 size_t response_data_size; 2992 u32 num_ports; 2993 u8 lq, num_vls; 2994 u8 res_lli, res_ler; 2995 u64 port_mask; 2996 u8 port_num; 2997 unsigned long vl; 2998 u32 vl_select_mask; 2999 int vfi; 3000 u16 link_width; 3001 u16 link_speed; 3002 3003 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; 3004 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); 3005 vl_select_mask = be32_to_cpu(req->vl_select_mask); 3006 res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT; 3007 res_lli = res_lli ? res_lli + ADD_LLI : 0; 3008 res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT; 3009 res_ler = res_ler ? res_ler + ADD_LER : 0; 3010 3011 if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) { 3012 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3013 return reply((struct ib_mad_hdr *)pmp); 3014 } 3015 3016 /* Sanity check */ 3017 response_data_size = sizeof(struct opa_port_data_counters_msg) + 3018 num_vls * sizeof(struct _vls_dctrs); 3019 3020 if (response_data_size > sizeof(pmp->data)) { 3021 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3022 return reply((struct ib_mad_hdr *)pmp); 3023 } 3024 3025 /* 3026 * The bit set in the mask needs to be consistent with the 3027 * port the request came in on. 3028 */ 3029 port_mask = be64_to_cpu(req->port_select_mask[3]); 3030 port_num = find_first_bit((unsigned long *)&port_mask, 3031 sizeof(port_mask) * 8); 3032 3033 if (port_num != port) { 3034 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3035 return reply((struct ib_mad_hdr *)pmp); 3036 } 3037 3038 rsp = &req->port[0]; 3039 memset(rsp, 0, sizeof(*rsp)); 3040 3041 rsp->port_number = port; 3042 /* 3043 * Note that link_quality_indicator is a 32 bit quantity in 3044 * 'datacounters' queries (as opposed to 'portinfo' queries, 3045 * where it's a byte). 3046 */ 3047 hfi1_read_link_quality(dd, &lq); 3048 rsp->link_quality_indicator = cpu_to_be32((u32)lq); 3049 pma_get_opa_port_dctrs(ibdev, rsp); 3050 3051 /* 3052 * Convert PortXmitWait counter from TXE 3053 * cycle times to flit times. 3054 */ 3055 link_width = 3056 tx_link_width(ppd->link_width_downgrade_tx_active); 3057 link_speed = get_link_speed(ppd->link_speed_active); 3058 rsp->port_xmit_wait = 3059 cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 3060 link_speed, C_VL_COUNT)); 3061 rsp->port_rcv_fecn = 3062 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); 3063 rsp->port_rcv_becn = 3064 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL)); 3065 rsp->port_error_counter_summary = 3066 cpu_to_be64(get_error_counter_summary(ibdev, port, 3067 res_lli, res_ler)); 3068 3069 vlinfo = &rsp->vls[0]; 3070 vfi = 0; 3071 /* The vl_select_mask has been checked above, and we know 3072 * that it contains only entries which represent valid VLs. 3073 * So in the for_each_set_bit() loop below, we don't need 3074 * any additional checks for vl. 3075 */ 3076 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), 3077 8 * sizeof(req->vl_select_mask)) { 3078 memset(vlinfo, 0, sizeof(*vlinfo)); 3079 3080 rsp->vls[vfi].port_vl_xmit_data = 3081 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL, 3082 idx_from_vl(vl))); 3083 3084 rsp->vls[vfi].port_vl_rcv_data = 3085 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL, 3086 idx_from_vl(vl))); 3087 3088 rsp->vls[vfi].port_vl_xmit_pkts = 3089 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, 3090 idx_from_vl(vl))); 3091 3092 rsp->vls[vfi].port_vl_rcv_pkts = 3093 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, 3094 idx_from_vl(vl))); 3095 3096 /* 3097 * Convert PortVlXmitWait counter from TXE 3098 * cycle times to flit times. 3099 */ 3100 rsp->vls[vfi].port_vl_xmit_wait = 3101 cpu_to_be64(get_xmit_wait_counters(ppd, link_width, 3102 link_speed, 3103 idx_from_vl(vl))); 3104 3105 rsp->vls[vfi].port_vl_rcv_fecn = 3106 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, 3107 idx_from_vl(vl))); 3108 rsp->vls[vfi].port_vl_rcv_becn = 3109 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL, 3110 idx_from_vl(vl))); 3111 3112 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */ 3113 /* rsp->port_vl_xmit_wasted_bw ??? */ 3114 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? 3115 * does this differ from rsp->vls[vfi].port_vl_xmit_wait 3116 */ 3117 /*rsp->vls[vfi].port_vl_mark_fecn = 3118 * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT 3119 * + offset)); 3120 */ 3121 vlinfo++; 3122 vfi++; 3123 } 3124 3125 a0_datacounters(ppd, rsp, vl_select_mask); 3126 3127 if (resp_len) 3128 *resp_len += response_data_size; 3129 3130 return reply((struct ib_mad_hdr *)pmp); 3131 } 3132 3133 static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp, 3134 struct ib_device *ibdev, u8 port) 3135 { 3136 struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *) 3137 pmp->data; 3138 struct _port_dctrs rsp; 3139 3140 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { 3141 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3142 goto bail; 3143 } 3144 3145 memset(&rsp, 0, sizeof(rsp)); 3146 pma_get_opa_port_dctrs(ibdev, &rsp); 3147 3148 p->port_xmit_data = rsp.port_xmit_data; 3149 p->port_rcv_data = rsp.port_rcv_data; 3150 p->port_xmit_packets = rsp.port_xmit_pkts; 3151 p->port_rcv_packets = rsp.port_rcv_pkts; 3152 p->port_unicast_xmit_packets = 0; 3153 p->port_unicast_rcv_packets = 0; 3154 p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts; 3155 p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts; 3156 3157 bail: 3158 return reply((struct ib_mad_hdr *)pmp); 3159 } 3160 3161 static void pma_get_opa_port_ectrs(struct ib_device *ibdev, 3162 struct _port_ectrs *rsp, u8 port) 3163 { 3164 u64 tmp, tmp2; 3165 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3166 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3167 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3168 3169 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); 3170 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, 3171 CNTR_INVALID_VL); 3172 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { 3173 /* overflow/wrapped */ 3174 rsp->link_error_recovery = cpu_to_be32(~0); 3175 } else { 3176 rsp->link_error_recovery = cpu_to_be32(tmp2); 3177 } 3178 3179 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, 3180 CNTR_INVALID_VL)); 3181 rsp->port_rcv_errors = 3182 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); 3183 rsp->port_rcv_remote_physical_errors = 3184 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, 3185 CNTR_INVALID_VL)); 3186 rsp->port_rcv_switch_relay_errors = 0; 3187 rsp->port_xmit_discards = 3188 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD, 3189 CNTR_INVALID_VL)); 3190 rsp->port_xmit_constraint_errors = 3191 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, 3192 CNTR_INVALID_VL)); 3193 rsp->port_rcv_constraint_errors = 3194 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, 3195 CNTR_INVALID_VL)); 3196 rsp->local_link_integrity_errors = 3197 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY, 3198 CNTR_INVALID_VL)); 3199 rsp->excessive_buffer_overruns = 3200 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL)); 3201 } 3202 3203 static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, 3204 struct ib_device *ibdev, 3205 u8 port, u32 *resp_len) 3206 { 3207 size_t response_data_size; 3208 struct _port_ectrs *rsp; 3209 u8 port_num; 3210 struct opa_port_error_counters64_msg *req; 3211 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3212 u32 num_ports; 3213 u8 num_pslm; 3214 u8 num_vls; 3215 struct hfi1_ibport *ibp; 3216 struct hfi1_pportdata *ppd; 3217 struct _vls_ectrs *vlinfo; 3218 unsigned long vl; 3219 u64 port_mask, tmp; 3220 u32 vl_select_mask; 3221 int vfi; 3222 3223 req = (struct opa_port_error_counters64_msg *)pmp->data; 3224 3225 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; 3226 3227 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); 3228 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); 3229 3230 if (num_ports != 1 || num_ports != num_pslm) { 3231 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3232 return reply((struct ib_mad_hdr *)pmp); 3233 } 3234 3235 response_data_size = sizeof(struct opa_port_error_counters64_msg) + 3236 num_vls * sizeof(struct _vls_ectrs); 3237 3238 if (response_data_size > sizeof(pmp->data)) { 3239 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3240 return reply((struct ib_mad_hdr *)pmp); 3241 } 3242 /* 3243 * The bit set in the mask needs to be consistent with the 3244 * port the request came in on. 3245 */ 3246 port_mask = be64_to_cpu(req->port_select_mask[3]); 3247 port_num = find_first_bit((unsigned long *)&port_mask, 3248 sizeof(port_mask) * 8); 3249 3250 if (port_num != port) { 3251 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3252 return reply((struct ib_mad_hdr *)pmp); 3253 } 3254 3255 rsp = &req->port[0]; 3256 3257 ibp = to_iport(ibdev, port_num); 3258 ppd = ppd_from_ibp(ibp); 3259 3260 memset(rsp, 0, sizeof(*rsp)); 3261 rsp->port_number = port_num; 3262 3263 pma_get_opa_port_ectrs(ibdev, rsp, port_num); 3264 3265 rsp->port_rcv_remote_physical_errors = 3266 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, 3267 CNTR_INVALID_VL)); 3268 rsp->fm_config_errors = 3269 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, 3270 CNTR_INVALID_VL)); 3271 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); 3272 3273 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; 3274 rsp->port_rcv_errors = 3275 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); 3276 vlinfo = &rsp->vls[0]; 3277 vfi = 0; 3278 vl_select_mask = be32_to_cpu(req->vl_select_mask); 3279 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), 3280 8 * sizeof(req->vl_select_mask)) { 3281 memset(vlinfo, 0, sizeof(*vlinfo)); 3282 rsp->vls[vfi].port_vl_xmit_discards = 3283 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, 3284 idx_from_vl(vl))); 3285 vlinfo += 1; 3286 vfi++; 3287 } 3288 3289 if (resp_len) 3290 *resp_len += response_data_size; 3291 3292 return reply((struct ib_mad_hdr *)pmp); 3293 } 3294 3295 static int pma_get_ib_portcounters(struct ib_pma_mad *pmp, 3296 struct ib_device *ibdev, u8 port) 3297 { 3298 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 3299 pmp->data; 3300 struct _port_ectrs rsp; 3301 u64 temp_link_overrun_errors; 3302 u64 temp_64; 3303 u32 temp_32; 3304 3305 memset(&rsp, 0, sizeof(rsp)); 3306 pma_get_opa_port_ectrs(ibdev, &rsp, port); 3307 3308 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { 3309 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3310 goto bail; 3311 } 3312 3313 p->symbol_error_counter = 0; /* N/A for OPA */ 3314 3315 temp_32 = be32_to_cpu(rsp.link_error_recovery); 3316 if (temp_32 > 0xFFUL) 3317 p->link_error_recovery_counter = 0xFF; 3318 else 3319 p->link_error_recovery_counter = (u8)temp_32; 3320 3321 temp_32 = be32_to_cpu(rsp.link_downed); 3322 if (temp_32 > 0xFFUL) 3323 p->link_downed_counter = 0xFF; 3324 else 3325 p->link_downed_counter = (u8)temp_32; 3326 3327 temp_64 = be64_to_cpu(rsp.port_rcv_errors); 3328 if (temp_64 > 0xFFFFUL) 3329 p->port_rcv_errors = cpu_to_be16(0xFFFF); 3330 else 3331 p->port_rcv_errors = cpu_to_be16((u16)temp_64); 3332 3333 temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors); 3334 if (temp_64 > 0xFFFFUL) 3335 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); 3336 else 3337 p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64); 3338 3339 temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors); 3340 p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64); 3341 3342 temp_64 = be64_to_cpu(rsp.port_xmit_discards); 3343 if (temp_64 > 0xFFFFUL) 3344 p->port_xmit_discards = cpu_to_be16(0xFFFF); 3345 else 3346 p->port_xmit_discards = cpu_to_be16((u16)temp_64); 3347 3348 temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors); 3349 if (temp_64 > 0xFFUL) 3350 p->port_xmit_constraint_errors = 0xFF; 3351 else 3352 p->port_xmit_constraint_errors = (u8)temp_64; 3353 3354 temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors); 3355 if (temp_64 > 0xFFUL) 3356 p->port_rcv_constraint_errors = 0xFFUL; 3357 else 3358 p->port_rcv_constraint_errors = (u8)temp_64; 3359 3360 /* LocalLink: 7:4, BufferOverrun: 3:0 */ 3361 temp_64 = be64_to_cpu(rsp.local_link_integrity_errors); 3362 if (temp_64 > 0xFUL) 3363 temp_64 = 0xFUL; 3364 3365 temp_link_overrun_errors = temp_64 << 4; 3366 3367 temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns); 3368 if (temp_64 > 0xFUL) 3369 temp_64 = 0xFUL; 3370 temp_link_overrun_errors |= temp_64; 3371 3372 p->link_overrun_errors = (u8)temp_link_overrun_errors; 3373 3374 p->vl15_dropped = 0; /* N/A for OPA */ 3375 3376 bail: 3377 return reply((struct ib_mad_hdr *)pmp); 3378 } 3379 3380 static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, 3381 struct ib_device *ibdev, 3382 u8 port, u32 *resp_len) 3383 { 3384 size_t response_data_size; 3385 struct _port_ei *rsp; 3386 struct opa_port_error_info_msg *req; 3387 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3388 u64 port_mask; 3389 u32 num_ports; 3390 u8 port_num; 3391 u8 num_pslm; 3392 u64 reg; 3393 3394 req = (struct opa_port_error_info_msg *)pmp->data; 3395 rsp = &req->port[0]; 3396 3397 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); 3398 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); 3399 3400 memset(rsp, 0, sizeof(*rsp)); 3401 3402 if (num_ports != 1 || num_ports != num_pslm) { 3403 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3404 return reply((struct ib_mad_hdr *)pmp); 3405 } 3406 3407 /* Sanity check */ 3408 response_data_size = sizeof(struct opa_port_error_info_msg); 3409 3410 if (response_data_size > sizeof(pmp->data)) { 3411 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3412 return reply((struct ib_mad_hdr *)pmp); 3413 } 3414 3415 /* 3416 * The bit set in the mask needs to be consistent with the port 3417 * the request came in on. 3418 */ 3419 port_mask = be64_to_cpu(req->port_select_mask[3]); 3420 port_num = find_first_bit((unsigned long *)&port_mask, 3421 sizeof(port_mask) * 8); 3422 3423 if (port_num != port) { 3424 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3425 return reply((struct ib_mad_hdr *)pmp); 3426 } 3427 3428 /* PortRcvErrorInfo */ 3429 rsp->port_rcv_ei.status_and_code = 3430 dd->err_info_rcvport.status_and_code; 3431 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1, 3432 &dd->err_info_rcvport.packet_flit1, sizeof(u64)); 3433 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2, 3434 &dd->err_info_rcvport.packet_flit2, sizeof(u64)); 3435 3436 /* ExcessiverBufferOverrunInfo */ 3437 reg = read_csr(dd, RCV_ERR_INFO); 3438 if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) { 3439 /* 3440 * if the RcvExcessBufferOverrun bit is set, save SC of 3441 * first pkt that encountered an excess buffer overrun 3442 */ 3443 u8 tmp = (u8)reg; 3444 3445 tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK; 3446 tmp <<= 2; 3447 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp; 3448 /* set the status bit */ 3449 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80; 3450 } 3451 3452 rsp->port_xmit_constraint_ei.status = 3453 dd->err_info_xmit_constraint.status; 3454 rsp->port_xmit_constraint_ei.pkey = 3455 cpu_to_be16(dd->err_info_xmit_constraint.pkey); 3456 rsp->port_xmit_constraint_ei.slid = 3457 cpu_to_be32(dd->err_info_xmit_constraint.slid); 3458 3459 rsp->port_rcv_constraint_ei.status = 3460 dd->err_info_rcv_constraint.status; 3461 rsp->port_rcv_constraint_ei.pkey = 3462 cpu_to_be16(dd->err_info_rcv_constraint.pkey); 3463 rsp->port_rcv_constraint_ei.slid = 3464 cpu_to_be32(dd->err_info_rcv_constraint.slid); 3465 3466 /* UncorrectableErrorInfo */ 3467 rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable; 3468 3469 /* FMConfigErrorInfo */ 3470 rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig; 3471 3472 if (resp_len) 3473 *resp_len += response_data_size; 3474 3475 return reply((struct ib_mad_hdr *)pmp); 3476 } 3477 3478 static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, 3479 struct ib_device *ibdev, 3480 u8 port, u32 *resp_len) 3481 { 3482 struct opa_clear_port_status *req = 3483 (struct opa_clear_port_status *)pmp->data; 3484 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3485 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3486 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3487 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; 3488 u64 portn = be64_to_cpu(req->port_select_mask[3]); 3489 u32 counter_select = be32_to_cpu(req->counter_select_mask); 3490 u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ 3491 unsigned long vl; 3492 3493 if ((nports != 1) || (portn != 1 << port)) { 3494 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3495 return reply((struct ib_mad_hdr *)pmp); 3496 } 3497 /* 3498 * only counters returned by pma_get_opa_portstatus() are 3499 * handled, so when pma_get_opa_portstatus() gets a fix, 3500 * the corresponding change should be made here as well. 3501 */ 3502 3503 if (counter_select & CS_PORT_XMIT_DATA) 3504 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0); 3505 3506 if (counter_select & CS_PORT_RCV_DATA) 3507 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0); 3508 3509 if (counter_select & CS_PORT_XMIT_PKTS) 3510 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0); 3511 3512 if (counter_select & CS_PORT_RCV_PKTS) 3513 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0); 3514 3515 if (counter_select & CS_PORT_MCAST_XMIT_PKTS) 3516 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0); 3517 3518 if (counter_select & CS_PORT_MCAST_RCV_PKTS) 3519 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0); 3520 3521 if (counter_select & CS_PORT_XMIT_WAIT) { 3522 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0); 3523 ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0; 3524 ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0; 3525 } 3526 /* ignore cs_sw_portCongestion for HFIs */ 3527 3528 if (counter_select & CS_PORT_RCV_FECN) 3529 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0); 3530 3531 if (counter_select & CS_PORT_RCV_BECN) 3532 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0); 3533 3534 /* ignore cs_port_xmit_time_cong for HFIs */ 3535 /* ignore cs_port_xmit_wasted_bw for now */ 3536 /* ignore cs_port_xmit_wait_data for now */ 3537 if (counter_select & CS_PORT_RCV_BUBBLE) 3538 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0); 3539 3540 /* Only applicable for switch */ 3541 /* if (counter_select & CS_PORT_MARK_FECN) 3542 * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0); 3543 */ 3544 3545 if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS) 3546 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0); 3547 3548 /* ignore cs_port_rcv_switch_relay_errors for HFIs */ 3549 if (counter_select & CS_PORT_XMIT_DISCARDS) 3550 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0); 3551 3552 if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS) 3553 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0); 3554 3555 if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS) 3556 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0); 3557 3558 if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) 3559 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0); 3560 3561 if (counter_select & CS_LINK_ERROR_RECOVERY) { 3562 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0); 3563 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, 3564 CNTR_INVALID_VL, 0); 3565 } 3566 3567 if (counter_select & CS_PORT_RCV_ERRORS) 3568 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0); 3569 3570 if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) { 3571 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0); 3572 dd->rcv_ovfl_cnt = 0; 3573 } 3574 3575 if (counter_select & CS_FM_CONFIG_ERRORS) 3576 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0); 3577 3578 if (counter_select & CS_LINK_DOWNED) 3579 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0); 3580 3581 if (counter_select & CS_UNCORRECTABLE_ERRORS) 3582 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); 3583 3584 for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), 3585 8 * sizeof(vl_select_mask)) { 3586 if (counter_select & CS_PORT_XMIT_DATA) 3587 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); 3588 3589 if (counter_select & CS_PORT_RCV_DATA) 3590 write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0); 3591 3592 if (counter_select & CS_PORT_XMIT_PKTS) 3593 write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0); 3594 3595 if (counter_select & CS_PORT_RCV_PKTS) 3596 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0); 3597 3598 if (counter_select & CS_PORT_XMIT_WAIT) { 3599 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0); 3600 ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0; 3601 ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0; 3602 } 3603 3604 /* sw_port_vl_congestion is 0 for HFIs */ 3605 if (counter_select & CS_PORT_RCV_FECN) 3606 write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0); 3607 3608 if (counter_select & CS_PORT_RCV_BECN) 3609 write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0); 3610 3611 /* port_vl_xmit_time_cong is 0 for HFIs */ 3612 /* port_vl_xmit_wasted_bw ??? */ 3613 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */ 3614 if (counter_select & CS_PORT_RCV_BUBBLE) 3615 write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0); 3616 3617 /* if (counter_select & CS_PORT_MARK_FECN) 3618 * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0); 3619 */ 3620 if (counter_select & C_SW_XMIT_DSCD_VL) 3621 write_port_cntr(ppd, C_SW_XMIT_DSCD_VL, 3622 idx_from_vl(vl), 0); 3623 } 3624 3625 if (resp_len) 3626 *resp_len += sizeof(*req); 3627 3628 return reply((struct ib_mad_hdr *)pmp); 3629 } 3630 3631 static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, 3632 struct ib_device *ibdev, 3633 u8 port, u32 *resp_len) 3634 { 3635 struct _port_ei *rsp; 3636 struct opa_port_error_info_msg *req; 3637 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3638 u64 port_mask; 3639 u32 num_ports; 3640 u8 port_num; 3641 u8 num_pslm; 3642 u32 error_info_select; 3643 3644 req = (struct opa_port_error_info_msg *)pmp->data; 3645 rsp = &req->port[0]; 3646 3647 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); 3648 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); 3649 3650 memset(rsp, 0, sizeof(*rsp)); 3651 3652 if (num_ports != 1 || num_ports != num_pslm) { 3653 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3654 return reply((struct ib_mad_hdr *)pmp); 3655 } 3656 3657 /* 3658 * The bit set in the mask needs to be consistent with the port 3659 * the request came in on. 3660 */ 3661 port_mask = be64_to_cpu(req->port_select_mask[3]); 3662 port_num = find_first_bit((unsigned long *)&port_mask, 3663 sizeof(port_mask) * 8); 3664 3665 if (port_num != port) { 3666 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3667 return reply((struct ib_mad_hdr *)pmp); 3668 } 3669 3670 error_info_select = be32_to_cpu(req->error_info_select_mask); 3671 3672 /* PortRcvErrorInfo */ 3673 if (error_info_select & ES_PORT_RCV_ERROR_INFO) 3674 /* turn off status bit */ 3675 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK; 3676 3677 /* ExcessiverBufferOverrunInfo */ 3678 if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO) 3679 /* 3680 * status bit is essentially kept in the h/w - bit 5 of 3681 * RCV_ERR_INFO 3682 */ 3683 write_csr(dd, RCV_ERR_INFO, 3684 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); 3685 3686 if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO) 3687 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK; 3688 3689 if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO) 3690 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK; 3691 3692 /* UncorrectableErrorInfo */ 3693 if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO) 3694 /* turn off status bit */ 3695 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK; 3696 3697 /* FMConfigErrorInfo */ 3698 if (error_info_select & ES_FM_CONFIG_ERROR_INFO) 3699 /* turn off status bit */ 3700 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK; 3701 3702 if (resp_len) 3703 *resp_len += sizeof(*req); 3704 3705 return reply((struct ib_mad_hdr *)pmp); 3706 } 3707 3708 struct opa_congestion_info_attr { 3709 __be16 congestion_info; 3710 u8 control_table_cap; /* Multiple of 64 entry unit CCTs */ 3711 u8 congestion_log_length; 3712 } __packed; 3713 3714 static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data, 3715 struct ib_device *ibdev, u8 port, 3716 u32 *resp_len, u32 max_len) 3717 { 3718 struct opa_congestion_info_attr *p = 3719 (struct opa_congestion_info_attr *)data; 3720 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3721 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3722 3723 if (smp_length_check(sizeof(*p), max_len)) { 3724 smp->status |= IB_SMP_INVALID_FIELD; 3725 return reply((struct ib_mad_hdr *)smp); 3726 } 3727 3728 p->congestion_info = 0; 3729 p->control_table_cap = ppd->cc_max_table_entries; 3730 p->congestion_log_length = OPA_CONG_LOG_ELEMS; 3731 3732 if (resp_len) 3733 *resp_len += sizeof(*p); 3734 3735 return reply((struct ib_mad_hdr *)smp); 3736 } 3737 3738 static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, 3739 u8 *data, struct ib_device *ibdev, 3740 u8 port, u32 *resp_len, u32 max_len) 3741 { 3742 int i; 3743 struct opa_congestion_setting_attr *p = 3744 (struct opa_congestion_setting_attr *)data; 3745 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3746 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3747 struct opa_congestion_setting_entry_shadow *entries; 3748 struct cc_state *cc_state; 3749 3750 if (smp_length_check(sizeof(*p), max_len)) { 3751 smp->status |= IB_SMP_INVALID_FIELD; 3752 return reply((struct ib_mad_hdr *)smp); 3753 } 3754 3755 rcu_read_lock(); 3756 3757 cc_state = get_cc_state(ppd); 3758 3759 if (!cc_state) { 3760 rcu_read_unlock(); 3761 return reply((struct ib_mad_hdr *)smp); 3762 } 3763 3764 entries = cc_state->cong_setting.entries; 3765 p->port_control = cpu_to_be16(cc_state->cong_setting.port_control); 3766 p->control_map = cpu_to_be32(cc_state->cong_setting.control_map); 3767 for (i = 0; i < OPA_MAX_SLS; i++) { 3768 p->entries[i].ccti_increase = entries[i].ccti_increase; 3769 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); 3770 p->entries[i].trigger_threshold = 3771 entries[i].trigger_threshold; 3772 p->entries[i].ccti_min = entries[i].ccti_min; 3773 } 3774 3775 rcu_read_unlock(); 3776 3777 if (resp_len) 3778 *resp_len += sizeof(*p); 3779 3780 return reply((struct ib_mad_hdr *)smp); 3781 } 3782 3783 /* 3784 * Apply congestion control information stored in the ppd to the 3785 * active structure. 3786 */ 3787 static void apply_cc_state(struct hfi1_pportdata *ppd) 3788 { 3789 struct cc_state *old_cc_state, *new_cc_state; 3790 3791 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); 3792 if (!new_cc_state) 3793 return; 3794 3795 /* 3796 * Hold the lock for updating *and* to prevent ppd information 3797 * from changing during the update. 3798 */ 3799 spin_lock(&ppd->cc_state_lock); 3800 3801 old_cc_state = get_cc_state_protected(ppd); 3802 if (!old_cc_state) { 3803 /* never active, or shutting down */ 3804 spin_unlock(&ppd->cc_state_lock); 3805 kfree(new_cc_state); 3806 return; 3807 } 3808 3809 *new_cc_state = *old_cc_state; 3810 3811 if (ppd->total_cct_entry) 3812 new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1; 3813 else 3814 new_cc_state->cct.ccti_limit = 0; 3815 3816 memcpy(new_cc_state->cct.entries, ppd->ccti_entries, 3817 ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)); 3818 3819 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED; 3820 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map; 3821 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, 3822 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry)); 3823 3824 rcu_assign_pointer(ppd->cc_state, new_cc_state); 3825 3826 spin_unlock(&ppd->cc_state_lock); 3827 3828 kfree_rcu(old_cc_state, rcu); 3829 } 3830 3831 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, 3832 struct ib_device *ibdev, u8 port, 3833 u32 *resp_len, u32 max_len) 3834 { 3835 struct opa_congestion_setting_attr *p = 3836 (struct opa_congestion_setting_attr *)data; 3837 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3838 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3839 struct opa_congestion_setting_entry_shadow *entries; 3840 int i; 3841 3842 if (smp_length_check(sizeof(*p), max_len)) { 3843 smp->status |= IB_SMP_INVALID_FIELD; 3844 return reply((struct ib_mad_hdr *)smp); 3845 } 3846 3847 /* 3848 * Save details from packet into the ppd. Hold the cc_state_lock so 3849 * our information is consistent with anyone trying to apply the state. 3850 */ 3851 spin_lock(&ppd->cc_state_lock); 3852 ppd->cc_sl_control_map = be32_to_cpu(p->control_map); 3853 3854 entries = ppd->congestion_entries; 3855 for (i = 0; i < OPA_MAX_SLS; i++) { 3856 entries[i].ccti_increase = p->entries[i].ccti_increase; 3857 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer); 3858 entries[i].trigger_threshold = 3859 p->entries[i].trigger_threshold; 3860 entries[i].ccti_min = p->entries[i].ccti_min; 3861 } 3862 spin_unlock(&ppd->cc_state_lock); 3863 3864 /* now apply the information */ 3865 apply_cc_state(ppd); 3866 3867 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, 3868 resp_len, max_len); 3869 } 3870 3871 static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, 3872 u8 *data, struct ib_device *ibdev, 3873 u8 port, u32 *resp_len, u32 max_len) 3874 { 3875 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3876 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3877 struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data; 3878 u64 ts; 3879 int i; 3880 3881 if (am || smp_length_check(sizeof(*cong_log), max_len)) { 3882 smp->status |= IB_SMP_INVALID_FIELD; 3883 return reply((struct ib_mad_hdr *)smp); 3884 } 3885 3886 spin_lock_irq(&ppd->cc_log_lock); 3887 3888 cong_log->log_type = OPA_CC_LOG_TYPE_HFI; 3889 cong_log->congestion_flags = 0; 3890 cong_log->threshold_event_counter = 3891 cpu_to_be16(ppd->threshold_event_counter); 3892 memcpy(cong_log->threshold_cong_event_map, 3893 ppd->threshold_cong_event_map, 3894 sizeof(cong_log->threshold_cong_event_map)); 3895 /* keep timestamp in units of 1.024 usec */ 3896 ts = ktime_get_ns() / 1024; 3897 cong_log->current_time_stamp = cpu_to_be32(ts); 3898 for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) { 3899 struct opa_hfi1_cong_log_event_internal *cce = 3900 &ppd->cc_events[ppd->cc_mad_idx++]; 3901 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS) 3902 ppd->cc_mad_idx = 0; 3903 /* 3904 * Entries which are older than twice the time 3905 * required to wrap the counter are supposed to 3906 * be zeroed (CA10-49 IBTA, release 1.2.1, V1). 3907 */ 3908 if ((ts - cce->timestamp) / 2 > U32_MAX) 3909 continue; 3910 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3); 3911 memcpy(cong_log->events[i].remote_qp_number_cn_entry, 3912 &cce->rqpn, 3); 3913 cong_log->events[i].sl_svc_type_cn_entry = 3914 ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7); 3915 cong_log->events[i].remote_lid_cn_entry = 3916 cpu_to_be32(cce->rlid); 3917 cong_log->events[i].timestamp_cn_entry = 3918 cpu_to_be32(cce->timestamp); 3919 } 3920 3921 /* 3922 * Reset threshold_cong_event_map, and threshold_event_counter 3923 * to 0 when log is read. 3924 */ 3925 memset(ppd->threshold_cong_event_map, 0x0, 3926 sizeof(ppd->threshold_cong_event_map)); 3927 ppd->threshold_event_counter = 0; 3928 3929 spin_unlock_irq(&ppd->cc_log_lock); 3930 3931 if (resp_len) 3932 *resp_len += sizeof(struct opa_hfi1_cong_log); 3933 3934 return reply((struct ib_mad_hdr *)smp); 3935 } 3936 3937 static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, 3938 struct ib_device *ibdev, u8 port, 3939 u32 *resp_len, u32 max_len) 3940 { 3941 struct ib_cc_table_attr *cc_table_attr = 3942 (struct ib_cc_table_attr *)data; 3943 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3944 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3945 u32 start_block = OPA_AM_START_BLK(am); 3946 u32 n_blocks = OPA_AM_NBLK(am); 3947 struct ib_cc_table_entry_shadow *entries; 3948 int i, j; 3949 u32 sentry, eentry; 3950 struct cc_state *cc_state; 3951 u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); 3952 3953 /* sanity check n_blocks, start_block */ 3954 if (n_blocks == 0 || smp_length_check(size, max_len) || 3955 start_block + n_blocks > ppd->cc_max_table_entries) { 3956 smp->status |= IB_SMP_INVALID_FIELD; 3957 return reply((struct ib_mad_hdr *)smp); 3958 } 3959 3960 rcu_read_lock(); 3961 3962 cc_state = get_cc_state(ppd); 3963 3964 if (!cc_state) { 3965 rcu_read_unlock(); 3966 return reply((struct ib_mad_hdr *)smp); 3967 } 3968 3969 sentry = start_block * IB_CCT_ENTRIES; 3970 eentry = sentry + (IB_CCT_ENTRIES * n_blocks); 3971 3972 cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit); 3973 3974 entries = cc_state->cct.entries; 3975 3976 /* return n_blocks, though the last block may not be full */ 3977 for (j = 0, i = sentry; i < eentry; j++, i++) 3978 cc_table_attr->ccti_entries[j].entry = 3979 cpu_to_be16(entries[i].entry); 3980 3981 rcu_read_unlock(); 3982 3983 if (resp_len) 3984 *resp_len += size; 3985 3986 return reply((struct ib_mad_hdr *)smp); 3987 } 3988 3989 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, 3990 struct ib_device *ibdev, u8 port, 3991 u32 *resp_len, u32 max_len) 3992 { 3993 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data; 3994 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3995 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 3996 u32 start_block = OPA_AM_START_BLK(am); 3997 u32 n_blocks = OPA_AM_NBLK(am); 3998 struct ib_cc_table_entry_shadow *entries; 3999 int i, j; 4000 u32 sentry, eentry; 4001 u16 ccti_limit; 4002 u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); 4003 4004 /* sanity check n_blocks, start_block */ 4005 if (n_blocks == 0 || smp_length_check(size, max_len) || 4006 start_block + n_blocks > ppd->cc_max_table_entries) { 4007 smp->status |= IB_SMP_INVALID_FIELD; 4008 return reply((struct ib_mad_hdr *)smp); 4009 } 4010 4011 sentry = start_block * IB_CCT_ENTRIES; 4012 eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) + 4013 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1; 4014 4015 /* sanity check ccti_limit */ 4016 ccti_limit = be16_to_cpu(p->ccti_limit); 4017 if (ccti_limit + 1 > eentry) { 4018 smp->status |= IB_SMP_INVALID_FIELD; 4019 return reply((struct ib_mad_hdr *)smp); 4020 } 4021 4022 /* 4023 * Save details from packet into the ppd. Hold the cc_state_lock so 4024 * our information is consistent with anyone trying to apply the state. 4025 */ 4026 spin_lock(&ppd->cc_state_lock); 4027 ppd->total_cct_entry = ccti_limit + 1; 4028 entries = ppd->ccti_entries; 4029 for (j = 0, i = sentry; i < eentry; j++, i++) 4030 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); 4031 spin_unlock(&ppd->cc_state_lock); 4032 4033 /* now apply the information */ 4034 apply_cc_state(ppd); 4035 4036 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len, 4037 max_len); 4038 } 4039 4040 struct opa_led_info { 4041 __be32 rsvd_led_mask; 4042 __be32 rsvd; 4043 }; 4044 4045 #define OPA_LED_SHIFT 31 4046 #define OPA_LED_MASK BIT(OPA_LED_SHIFT) 4047 4048 static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, 4049 struct ib_device *ibdev, u8 port, 4050 u32 *resp_len, u32 max_len) 4051 { 4052 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 4053 struct hfi1_pportdata *ppd = dd->pport; 4054 struct opa_led_info *p = (struct opa_led_info *)data; 4055 u32 nport = OPA_AM_NPORT(am); 4056 u32 is_beaconing_active; 4057 4058 if (nport != 1 || smp_length_check(sizeof(*p), max_len)) { 4059 smp->status |= IB_SMP_INVALID_FIELD; 4060 return reply((struct ib_mad_hdr *)smp); 4061 } 4062 4063 /* 4064 * This pairs with the memory barrier in hfi1_start_led_override to 4065 * ensure that we read the correct state of LED beaconing represented 4066 * by led_override_timer_active 4067 */ 4068 smp_rmb(); 4069 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); 4070 p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT); 4071 4072 if (resp_len) 4073 *resp_len += sizeof(struct opa_led_info); 4074 4075 return reply((struct ib_mad_hdr *)smp); 4076 } 4077 4078 static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, 4079 struct ib_device *ibdev, u8 port, 4080 u32 *resp_len, u32 max_len) 4081 { 4082 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 4083 struct opa_led_info *p = (struct opa_led_info *)data; 4084 u32 nport = OPA_AM_NPORT(am); 4085 int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK); 4086 4087 if (nport != 1 || smp_length_check(sizeof(*p), max_len)) { 4088 smp->status |= IB_SMP_INVALID_FIELD; 4089 return reply((struct ib_mad_hdr *)smp); 4090 } 4091 4092 if (on) 4093 hfi1_start_led_override(dd->pport, 2000, 1500); 4094 else 4095 shutdown_led_override(dd->pport); 4096 4097 return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len, 4098 max_len); 4099 } 4100 4101 static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, 4102 u8 *data, struct ib_device *ibdev, u8 port, 4103 u32 *resp_len, u32 max_len) 4104 { 4105 int ret; 4106 struct hfi1_ibport *ibp = to_iport(ibdev, port); 4107 4108 switch (attr_id) { 4109 case IB_SMP_ATTR_NODE_DESC: 4110 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port, 4111 resp_len, max_len); 4112 break; 4113 case IB_SMP_ATTR_NODE_INFO: 4114 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port, 4115 resp_len, max_len); 4116 break; 4117 case IB_SMP_ATTR_PORT_INFO: 4118 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, 4119 resp_len, max_len); 4120 break; 4121 case IB_SMP_ATTR_PKEY_TABLE: 4122 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port, 4123 resp_len, max_len); 4124 break; 4125 case OPA_ATTRIB_ID_SL_TO_SC_MAP: 4126 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, 4127 resp_len, max_len); 4128 break; 4129 case OPA_ATTRIB_ID_SC_TO_SL_MAP: 4130 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, 4131 resp_len, max_len); 4132 break; 4133 case OPA_ATTRIB_ID_SC_TO_VLT_MAP: 4134 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, 4135 resp_len, max_len); 4136 break; 4137 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: 4138 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, 4139 resp_len, max_len); 4140 break; 4141 case OPA_ATTRIB_ID_PORT_STATE_INFO: 4142 ret = __subn_get_opa_psi(smp, am, data, ibdev, port, 4143 resp_len, max_len); 4144 break; 4145 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE: 4146 ret = __subn_get_opa_bct(smp, am, data, ibdev, port, 4147 resp_len, max_len); 4148 break; 4149 case OPA_ATTRIB_ID_CABLE_INFO: 4150 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port, 4151 resp_len, max_len); 4152 break; 4153 case IB_SMP_ATTR_VL_ARB_TABLE: 4154 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port, 4155 resp_len, max_len); 4156 break; 4157 case OPA_ATTRIB_ID_CONGESTION_INFO: 4158 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port, 4159 resp_len, max_len); 4160 break; 4161 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING: 4162 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev, 4163 port, resp_len, max_len); 4164 break; 4165 case OPA_ATTRIB_ID_HFI_CONGESTION_LOG: 4166 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev, 4167 port, resp_len, max_len); 4168 break; 4169 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE: 4170 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port, 4171 resp_len, max_len); 4172 break; 4173 case IB_SMP_ATTR_LED_INFO: 4174 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port, 4175 resp_len, max_len); 4176 break; 4177 case IB_SMP_ATTR_SM_INFO: 4178 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) 4179 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 4180 if (ibp->rvp.port_cap_flags & IB_PORT_SM) 4181 return IB_MAD_RESULT_SUCCESS; 4182 /* FALLTHROUGH */ 4183 default: 4184 smp->status |= IB_SMP_UNSUP_METH_ATTR; 4185 ret = reply((struct ib_mad_hdr *)smp); 4186 break; 4187 } 4188 return ret; 4189 } 4190 4191 static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, 4192 u8 *data, struct ib_device *ibdev, u8 port, 4193 u32 *resp_len, u32 max_len) 4194 { 4195 int ret; 4196 struct hfi1_ibport *ibp = to_iport(ibdev, port); 4197 4198 switch (attr_id) { 4199 case IB_SMP_ATTR_PORT_INFO: 4200 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port, 4201 resp_len, max_len); 4202 break; 4203 case IB_SMP_ATTR_PKEY_TABLE: 4204 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port, 4205 resp_len, max_len); 4206 break; 4207 case OPA_ATTRIB_ID_SL_TO_SC_MAP: 4208 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port, 4209 resp_len, max_len); 4210 break; 4211 case OPA_ATTRIB_ID_SC_TO_SL_MAP: 4212 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port, 4213 resp_len, max_len); 4214 break; 4215 case OPA_ATTRIB_ID_SC_TO_VLT_MAP: 4216 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port, 4217 resp_len, max_len); 4218 break; 4219 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: 4220 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port, 4221 resp_len, max_len); 4222 break; 4223 case OPA_ATTRIB_ID_PORT_STATE_INFO: 4224 ret = __subn_set_opa_psi(smp, am, data, ibdev, port, 4225 resp_len, max_len); 4226 break; 4227 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE: 4228 ret = __subn_set_opa_bct(smp, am, data, ibdev, port, 4229 resp_len, max_len); 4230 break; 4231 case IB_SMP_ATTR_VL_ARB_TABLE: 4232 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port, 4233 resp_len, max_len); 4234 break; 4235 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING: 4236 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev, 4237 port, resp_len, max_len); 4238 break; 4239 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE: 4240 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port, 4241 resp_len, max_len); 4242 break; 4243 case IB_SMP_ATTR_LED_INFO: 4244 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port, 4245 resp_len, max_len); 4246 break; 4247 case IB_SMP_ATTR_SM_INFO: 4248 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) 4249 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 4250 if (ibp->rvp.port_cap_flags & IB_PORT_SM) 4251 return IB_MAD_RESULT_SUCCESS; 4252 /* FALLTHROUGH */ 4253 default: 4254 smp->status |= IB_SMP_UNSUP_METH_ATTR; 4255 ret = reply((struct ib_mad_hdr *)smp); 4256 break; 4257 } 4258 return ret; 4259 } 4260 4261 static inline void set_aggr_error(struct opa_aggregate *ag) 4262 { 4263 ag->err_reqlength |= cpu_to_be16(0x8000); 4264 } 4265 4266 static int subn_get_opa_aggregate(struct opa_smp *smp, 4267 struct ib_device *ibdev, u8 port, 4268 u32 *resp_len) 4269 { 4270 int i; 4271 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff; 4272 u8 *next_smp = opa_get_smp_data(smp); 4273 4274 if (num_attr < 1 || num_attr > 117) { 4275 smp->status |= IB_SMP_INVALID_FIELD; 4276 return reply((struct ib_mad_hdr *)smp); 4277 } 4278 4279 for (i = 0; i < num_attr; i++) { 4280 struct opa_aggregate *agg; 4281 size_t agg_data_len; 4282 size_t agg_size; 4283 u32 am; 4284 4285 agg = (struct opa_aggregate *)next_smp; 4286 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8; 4287 agg_size = sizeof(*agg) + agg_data_len; 4288 am = be32_to_cpu(agg->attr_mod); 4289 4290 *resp_len += agg_size; 4291 4292 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) { 4293 smp->status |= IB_SMP_INVALID_FIELD; 4294 return reply((struct ib_mad_hdr *)smp); 4295 } 4296 4297 /* zero the payload for this segment */ 4298 memset(next_smp + sizeof(*agg), 0, agg_data_len); 4299 4300 (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data, 4301 ibdev, port, NULL, (u32)agg_data_len); 4302 4303 if (smp->status & IB_SMP_INVALID_FIELD) 4304 break; 4305 if (smp->status & ~IB_SMP_DIRECTION) { 4306 set_aggr_error(agg); 4307 return reply((struct ib_mad_hdr *)smp); 4308 } 4309 next_smp += agg_size; 4310 } 4311 4312 return reply((struct ib_mad_hdr *)smp); 4313 } 4314 4315 static int subn_set_opa_aggregate(struct opa_smp *smp, 4316 struct ib_device *ibdev, u8 port, 4317 u32 *resp_len) 4318 { 4319 int i; 4320 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff; 4321 u8 *next_smp = opa_get_smp_data(smp); 4322 4323 if (num_attr < 1 || num_attr > 117) { 4324 smp->status |= IB_SMP_INVALID_FIELD; 4325 return reply((struct ib_mad_hdr *)smp); 4326 } 4327 4328 for (i = 0; i < num_attr; i++) { 4329 struct opa_aggregate *agg; 4330 size_t agg_data_len; 4331 size_t agg_size; 4332 u32 am; 4333 4334 agg = (struct opa_aggregate *)next_smp; 4335 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8; 4336 agg_size = sizeof(*agg) + agg_data_len; 4337 am = be32_to_cpu(agg->attr_mod); 4338 4339 *resp_len += agg_size; 4340 4341 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) { 4342 smp->status |= IB_SMP_INVALID_FIELD; 4343 return reply((struct ib_mad_hdr *)smp); 4344 } 4345 4346 (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data, 4347 ibdev, port, NULL, (u32)agg_data_len); 4348 if (smp->status & IB_SMP_INVALID_FIELD) 4349 break; 4350 if (smp->status & ~IB_SMP_DIRECTION) { 4351 set_aggr_error(agg); 4352 return reply((struct ib_mad_hdr *)smp); 4353 } 4354 next_smp += agg_size; 4355 } 4356 4357 return reply((struct ib_mad_hdr *)smp); 4358 } 4359 4360 /* 4361 * OPAv1 specifies that, on the transition to link up, these counters 4362 * are cleared: 4363 * PortRcvErrors [*] 4364 * LinkErrorRecovery 4365 * LocalLinkIntegrityErrors 4366 * ExcessiveBufferOverruns [*] 4367 * 4368 * [*] Error info associated with these counters is retained, but the 4369 * error info status is reset to 0. 4370 */ 4371 void clear_linkup_counters(struct hfi1_devdata *dd) 4372 { 4373 /* PortRcvErrors */ 4374 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0); 4375 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK; 4376 /* LinkErrorRecovery */ 4377 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0); 4378 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0); 4379 /* LocalLinkIntegrityErrors */ 4380 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0); 4381 /* ExcessiveBufferOverruns */ 4382 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0); 4383 dd->rcv_ovfl_cnt = 0; 4384 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK; 4385 } 4386 4387 static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp) 4388 { 4389 unsigned int i; 4390 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 4391 4392 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) 4393 if (ppd->pkeys[i] == FULL_MGMT_P_KEY) 4394 return 1; 4395 4396 return 0; 4397 } 4398 4399 /* 4400 * is_local_mad() returns 1 if 'mad' is sent from, and destined to the 4401 * local node, 0 otherwise. 4402 */ 4403 static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad, 4404 const struct ib_wc *in_wc) 4405 { 4406 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 4407 const struct opa_smp *smp = (const struct opa_smp *)mad; 4408 4409 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 4410 return (smp->hop_cnt == 0 && 4411 smp->route.dr.dr_slid == OPA_LID_PERMISSIVE && 4412 smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE); 4413 } 4414 4415 return (in_wc->slid == ppd->lid); 4416 } 4417 4418 /* 4419 * opa_local_smp_check() should only be called on MADs for which 4420 * is_local_mad() returns true. It applies the SMP checks that are 4421 * specific to SMPs which are sent from, and destined to this node. 4422 * opa_local_smp_check() returns 0 if the SMP passes its checks, 1 4423 * otherwise. 4424 * 4425 * SMPs which arrive from other nodes are instead checked by 4426 * opa_smp_check(). 4427 */ 4428 static int opa_local_smp_check(struct hfi1_ibport *ibp, 4429 const struct ib_wc *in_wc) 4430 { 4431 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 4432 u16 pkey; 4433 4434 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys)) 4435 return 1; 4436 4437 pkey = ppd->pkeys[in_wc->pkey_index]; 4438 /* 4439 * We need to do the "node-local" checks specified in OPAv1, 4440 * rev 0.90, section 9.10.26, which are: 4441 * - pkey is 0x7fff, or 0xffff 4442 * - Source QPN == 0 || Destination QPN == 0 4443 * - the MAD header's management class is either 4444 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or 4445 * IB_MGMT_CLASS_SUBN_LID_ROUTED 4446 * - SLID != 0 4447 * 4448 * However, we know (and so don't need to check again) that, 4449 * for local SMPs, the MAD stack passes MADs with: 4450 * - Source QPN of 0 4451 * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 4452 * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or 4453 * our own port's lid 4454 * 4455 */ 4456 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) 4457 return 0; 4458 ingress_pkey_table_fail(ppd, pkey, in_wc->slid); 4459 return 1; 4460 } 4461 4462 /** 4463 * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets. 4464 * @ibp: IB port data 4465 * @in_mad: MAD packet with header and data 4466 * @in_wc: Work completion data such as source LID, port number, etc. 4467 * 4468 * These are all the possible logic rules for validating a pkey: 4469 * 4470 * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY, 4471 * and NOT self-originated packet: 4472 * Drop MAD packet as it should always be part of the 4473 * management partition unless it's a self-originated packet. 4474 * 4475 * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table: 4476 * The packet is coming from a management node and the receiving node 4477 * is also a management node, so it is safe for the packet to go through. 4478 * 4479 * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table: 4480 * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table. 4481 * It could be an FM misconfiguration. 4482 * 4483 * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table: 4484 * It is safe for the packet to go through since a non-management node is 4485 * talking to another non-management node. 4486 * 4487 * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table: 4488 * Drop the packet because a non-management node is talking to a 4489 * management node, and it could be an attack. 4490 * 4491 * For the implementation, these rules can be simplied to only checking 4492 * for (a) and (e). There's no need to check for rule (b) as 4493 * the packet doesn't need to be dropped. Rule (c) is not possible in 4494 * the driver as LIM_MGMT_P_KEY is always in the pkey table. 4495 * 4496 * Return: 4497 * 0 - pkey is okay, -EINVAL it's a bad pkey 4498 */ 4499 static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp, 4500 const struct opa_mad *in_mad, 4501 const struct ib_wc *in_wc) 4502 { 4503 u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index); 4504 4505 /* Rule (a) from above */ 4506 if (!is_local_mad(ibp, in_mad, in_wc) && 4507 pkey_value != LIM_MGMT_P_KEY && 4508 pkey_value != FULL_MGMT_P_KEY) 4509 return -EINVAL; 4510 4511 /* Rule (e) from above */ 4512 if (pkey_value == LIM_MGMT_P_KEY && 4513 is_full_mgmt_pkey_in_table(ibp)) 4514 return -EINVAL; 4515 4516 return 0; 4517 } 4518 4519 static int process_subn_opa(struct ib_device *ibdev, int mad_flags, 4520 u8 port, const struct opa_mad *in_mad, 4521 struct opa_mad *out_mad, 4522 u32 *resp_len) 4523 { 4524 struct opa_smp *smp = (struct opa_smp *)out_mad; 4525 struct hfi1_ibport *ibp = to_iport(ibdev, port); 4526 u8 *data; 4527 u32 am, data_size; 4528 __be16 attr_id; 4529 int ret; 4530 4531 *out_mad = *in_mad; 4532 data = opa_get_smp_data(smp); 4533 data_size = (u32)opa_get_smp_data_size(smp); 4534 4535 am = be32_to_cpu(smp->attr_mod); 4536 attr_id = smp->attr_id; 4537 if (smp->class_version != OPA_SM_CLASS_VERSION) { 4538 smp->status |= IB_SMP_UNSUP_VERSION; 4539 ret = reply((struct ib_mad_hdr *)smp); 4540 return ret; 4541 } 4542 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey, 4543 smp->route.dr.dr_slid, smp->route.dr.return_path, 4544 smp->hop_cnt); 4545 if (ret) { 4546 u32 port_num = be32_to_cpu(smp->attr_mod); 4547 4548 /* 4549 * If this is a get/set portinfo, we already check the 4550 * M_Key if the MAD is for another port and the M_Key 4551 * is OK on the receiving port. This check is needed 4552 * to increment the error counters when the M_Key 4553 * fails to match on *both* ports. 4554 */ 4555 if (attr_id == IB_SMP_ATTR_PORT_INFO && 4556 (smp->method == IB_MGMT_METHOD_GET || 4557 smp->method == IB_MGMT_METHOD_SET) && 4558 port_num && port_num <= ibdev->phys_port_cnt && 4559 port != port_num) 4560 (void)check_mkey(to_iport(ibdev, port_num), 4561 (struct ib_mad_hdr *)smp, 0, 4562 smp->mkey, smp->route.dr.dr_slid, 4563 smp->route.dr.return_path, 4564 smp->hop_cnt); 4565 ret = IB_MAD_RESULT_FAILURE; 4566 return ret; 4567 } 4568 4569 *resp_len = opa_get_smp_header_size(smp); 4570 4571 switch (smp->method) { 4572 case IB_MGMT_METHOD_GET: 4573 switch (attr_id) { 4574 default: 4575 clear_opa_smp_data(smp); 4576 ret = subn_get_opa_sma(attr_id, smp, am, data, 4577 ibdev, port, resp_len, 4578 data_size); 4579 break; 4580 case OPA_ATTRIB_ID_AGGREGATE: 4581 ret = subn_get_opa_aggregate(smp, ibdev, port, 4582 resp_len); 4583 break; 4584 } 4585 break; 4586 case IB_MGMT_METHOD_SET: 4587 switch (attr_id) { 4588 default: 4589 ret = subn_set_opa_sma(attr_id, smp, am, data, 4590 ibdev, port, resp_len, 4591 data_size); 4592 break; 4593 case OPA_ATTRIB_ID_AGGREGATE: 4594 ret = subn_set_opa_aggregate(smp, ibdev, port, 4595 resp_len); 4596 break; 4597 } 4598 break; 4599 case IB_MGMT_METHOD_TRAP: 4600 case IB_MGMT_METHOD_REPORT: 4601 case IB_MGMT_METHOD_REPORT_RESP: 4602 case IB_MGMT_METHOD_GET_RESP: 4603 /* 4604 * The ib_mad module will call us to process responses 4605 * before checking for other consumers. 4606 * Just tell the caller to process it normally. 4607 */ 4608 ret = IB_MAD_RESULT_SUCCESS; 4609 break; 4610 case IB_MGMT_METHOD_TRAP_REPRESS: 4611 subn_handle_opa_trap_repress(ibp, smp); 4612 /* Always successful */ 4613 ret = IB_MAD_RESULT_SUCCESS; 4614 break; 4615 default: 4616 smp->status |= IB_SMP_UNSUP_METHOD; 4617 ret = reply((struct ib_mad_hdr *)smp); 4618 break; 4619 } 4620 4621 return ret; 4622 } 4623 4624 static int process_subn(struct ib_device *ibdev, int mad_flags, 4625 u8 port, const struct ib_mad *in_mad, 4626 struct ib_mad *out_mad) 4627 { 4628 struct ib_smp *smp = (struct ib_smp *)out_mad; 4629 struct hfi1_ibport *ibp = to_iport(ibdev, port); 4630 int ret; 4631 4632 *out_mad = *in_mad; 4633 if (smp->class_version != 1) { 4634 smp->status |= IB_SMP_UNSUP_VERSION; 4635 ret = reply((struct ib_mad_hdr *)smp); 4636 return ret; 4637 } 4638 4639 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, 4640 smp->mkey, (__force __be32)smp->dr_slid, 4641 smp->return_path, smp->hop_cnt); 4642 if (ret) { 4643 u32 port_num = be32_to_cpu(smp->attr_mod); 4644 4645 /* 4646 * If this is a get/set portinfo, we already check the 4647 * M_Key if the MAD is for another port and the M_Key 4648 * is OK on the receiving port. This check is needed 4649 * to increment the error counters when the M_Key 4650 * fails to match on *both* ports. 4651 */ 4652 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && 4653 (smp->method == IB_MGMT_METHOD_GET || 4654 smp->method == IB_MGMT_METHOD_SET) && 4655 port_num && port_num <= ibdev->phys_port_cnt && 4656 port != port_num) 4657 (void)check_mkey(to_iport(ibdev, port_num), 4658 (struct ib_mad_hdr *)smp, 0, 4659 smp->mkey, 4660 (__force __be32)smp->dr_slid, 4661 smp->return_path, smp->hop_cnt); 4662 ret = IB_MAD_RESULT_FAILURE; 4663 return ret; 4664 } 4665 4666 switch (smp->method) { 4667 case IB_MGMT_METHOD_GET: 4668 switch (smp->attr_id) { 4669 case IB_SMP_ATTR_NODE_INFO: 4670 ret = subn_get_nodeinfo(smp, ibdev, port); 4671 break; 4672 default: 4673 smp->status |= IB_SMP_UNSUP_METH_ATTR; 4674 ret = reply((struct ib_mad_hdr *)smp); 4675 break; 4676 } 4677 break; 4678 } 4679 4680 return ret; 4681 } 4682 4683 static int process_perf(struct ib_device *ibdev, u8 port, 4684 const struct ib_mad *in_mad, 4685 struct ib_mad *out_mad) 4686 { 4687 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; 4688 struct ib_class_port_info *cpi = (struct ib_class_port_info *) 4689 &pmp->data; 4690 int ret = IB_MAD_RESULT_FAILURE; 4691 4692 *out_mad = *in_mad; 4693 if (pmp->mad_hdr.class_version != 1) { 4694 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; 4695 ret = reply((struct ib_mad_hdr *)pmp); 4696 return ret; 4697 } 4698 4699 switch (pmp->mad_hdr.method) { 4700 case IB_MGMT_METHOD_GET: 4701 switch (pmp->mad_hdr.attr_id) { 4702 case IB_PMA_PORT_COUNTERS: 4703 ret = pma_get_ib_portcounters(pmp, ibdev, port); 4704 break; 4705 case IB_PMA_PORT_COUNTERS_EXT: 4706 ret = pma_get_ib_portcounters_ext(pmp, ibdev, port); 4707 break; 4708 case IB_PMA_CLASS_PORT_INFO: 4709 cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 4710 ret = reply((struct ib_mad_hdr *)pmp); 4711 break; 4712 default: 4713 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 4714 ret = reply((struct ib_mad_hdr *)pmp); 4715 break; 4716 } 4717 break; 4718 4719 case IB_MGMT_METHOD_SET: 4720 if (pmp->mad_hdr.attr_id) { 4721 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 4722 ret = reply((struct ib_mad_hdr *)pmp); 4723 } 4724 break; 4725 4726 case IB_MGMT_METHOD_TRAP: 4727 case IB_MGMT_METHOD_GET_RESP: 4728 /* 4729 * The ib_mad module will call us to process responses 4730 * before checking for other consumers. 4731 * Just tell the caller to process it normally. 4732 */ 4733 ret = IB_MAD_RESULT_SUCCESS; 4734 break; 4735 4736 default: 4737 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; 4738 ret = reply((struct ib_mad_hdr *)pmp); 4739 break; 4740 } 4741 4742 return ret; 4743 } 4744 4745 static int process_perf_opa(struct ib_device *ibdev, u8 port, 4746 const struct opa_mad *in_mad, 4747 struct opa_mad *out_mad, u32 *resp_len) 4748 { 4749 struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad; 4750 int ret; 4751 4752 *out_mad = *in_mad; 4753 4754 if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) { 4755 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; 4756 return reply((struct ib_mad_hdr *)pmp); 4757 } 4758 4759 *resp_len = sizeof(pmp->mad_hdr); 4760 4761 switch (pmp->mad_hdr.method) { 4762 case IB_MGMT_METHOD_GET: 4763 switch (pmp->mad_hdr.attr_id) { 4764 case IB_PMA_CLASS_PORT_INFO: 4765 ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len); 4766 break; 4767 case OPA_PM_ATTRIB_ID_PORT_STATUS: 4768 ret = pma_get_opa_portstatus(pmp, ibdev, port, 4769 resp_len); 4770 break; 4771 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS: 4772 ret = pma_get_opa_datacounters(pmp, ibdev, port, 4773 resp_len); 4774 break; 4775 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS: 4776 ret = pma_get_opa_porterrors(pmp, ibdev, port, 4777 resp_len); 4778 break; 4779 case OPA_PM_ATTRIB_ID_ERROR_INFO: 4780 ret = pma_get_opa_errorinfo(pmp, ibdev, port, 4781 resp_len); 4782 break; 4783 default: 4784 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 4785 ret = reply((struct ib_mad_hdr *)pmp); 4786 break; 4787 } 4788 break; 4789 4790 case IB_MGMT_METHOD_SET: 4791 switch (pmp->mad_hdr.attr_id) { 4792 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS: 4793 ret = pma_set_opa_portstatus(pmp, ibdev, port, 4794 resp_len); 4795 break; 4796 case OPA_PM_ATTRIB_ID_ERROR_INFO: 4797 ret = pma_set_opa_errorinfo(pmp, ibdev, port, 4798 resp_len); 4799 break; 4800 default: 4801 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; 4802 ret = reply((struct ib_mad_hdr *)pmp); 4803 break; 4804 } 4805 break; 4806 4807 case IB_MGMT_METHOD_TRAP: 4808 case IB_MGMT_METHOD_GET_RESP: 4809 /* 4810 * The ib_mad module will call us to process responses 4811 * before checking for other consumers. 4812 * Just tell the caller to process it normally. 4813 */ 4814 ret = IB_MAD_RESULT_SUCCESS; 4815 break; 4816 4817 default: 4818 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; 4819 ret = reply((struct ib_mad_hdr *)pmp); 4820 break; 4821 } 4822 4823 return ret; 4824 } 4825 4826 static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags, 4827 u8 port, const struct ib_wc *in_wc, 4828 const struct ib_grh *in_grh, 4829 const struct opa_mad *in_mad, 4830 struct opa_mad *out_mad, size_t *out_mad_size, 4831 u16 *out_mad_pkey_index) 4832 { 4833 int ret; 4834 int pkey_idx; 4835 u32 resp_len = 0; 4836 struct hfi1_ibport *ibp = to_iport(ibdev, port); 4837 4838 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY); 4839 if (pkey_idx < 0) { 4840 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n", 4841 hfi1_get_pkey(ibp, 1)); 4842 pkey_idx = 1; 4843 } 4844 *out_mad_pkey_index = (u16)pkey_idx; 4845 4846 switch (in_mad->mad_hdr.mgmt_class) { 4847 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 4848 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 4849 if (is_local_mad(ibp, in_mad, in_wc)) { 4850 ret = opa_local_smp_check(ibp, in_wc); 4851 if (ret) 4852 return IB_MAD_RESULT_FAILURE; 4853 } 4854 ret = process_subn_opa(ibdev, mad_flags, port, in_mad, 4855 out_mad, &resp_len); 4856 goto bail; 4857 case IB_MGMT_CLASS_PERF_MGMT: 4858 ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc); 4859 if (ret) 4860 return IB_MAD_RESULT_FAILURE; 4861 4862 ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len); 4863 goto bail; 4864 4865 default: 4866 ret = IB_MAD_RESULT_SUCCESS; 4867 } 4868 4869 bail: 4870 if (ret & IB_MAD_RESULT_REPLY) 4871 *out_mad_size = round_up(resp_len, 8); 4872 else if (ret & IB_MAD_RESULT_SUCCESS) 4873 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh); 4874 4875 return ret; 4876 } 4877 4878 static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port, 4879 const struct ib_wc *in_wc, 4880 const struct ib_grh *in_grh, 4881 const struct ib_mad *in_mad, 4882 struct ib_mad *out_mad) 4883 { 4884 int ret; 4885 4886 switch (in_mad->mad_hdr.mgmt_class) { 4887 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 4888 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 4889 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); 4890 break; 4891 case IB_MGMT_CLASS_PERF_MGMT: 4892 ret = process_perf(ibdev, port, in_mad, out_mad); 4893 break; 4894 default: 4895 ret = IB_MAD_RESULT_SUCCESS; 4896 break; 4897 } 4898 4899 return ret; 4900 } 4901 4902 /** 4903 * hfi1_process_mad - process an incoming MAD packet 4904 * @ibdev: the infiniband device this packet came in on 4905 * @mad_flags: MAD flags 4906 * @port: the port number this packet came in on 4907 * @in_wc: the work completion entry for this packet 4908 * @in_grh: the global route header for this packet 4909 * @in_mad: the incoming MAD 4910 * @out_mad: any outgoing MAD reply 4911 * 4912 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not 4913 * interested in processing. 4914 * 4915 * Note that the verbs framework has already done the MAD sanity checks, 4916 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 4917 * MADs. 4918 * 4919 * This is called by the ib_mad module. 4920 */ 4921 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 4922 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 4923 const struct ib_mad_hdr *in_mad, size_t in_mad_size, 4924 struct ib_mad_hdr *out_mad, size_t *out_mad_size, 4925 u16 *out_mad_pkey_index) 4926 { 4927 switch (in_mad->base_version) { 4928 case OPA_MGMT_BASE_VERSION: 4929 if (unlikely(in_mad_size != sizeof(struct opa_mad))) { 4930 dev_err(ibdev->dev.parent, "invalid in_mad_size\n"); 4931 return IB_MAD_RESULT_FAILURE; 4932 } 4933 return hfi1_process_opa_mad(ibdev, mad_flags, port, 4934 in_wc, in_grh, 4935 (struct opa_mad *)in_mad, 4936 (struct opa_mad *)out_mad, 4937 out_mad_size, 4938 out_mad_pkey_index); 4939 case IB_MGMT_BASE_VERSION: 4940 return hfi1_process_ib_mad(ibdev, mad_flags, port, 4941 in_wc, in_grh, 4942 (const struct ib_mad *)in_mad, 4943 (struct ib_mad *)out_mad); 4944 default: 4945 break; 4946 } 4947 4948 return IB_MAD_RESULT_FAILURE; 4949 } 4950