1 #ifndef _HFI1_KERNEL_H 2 #define _HFI1_KERNEL_H 3 /* 4 * Copyright(c) 2015-2017 Intel Corporation. 5 * 6 * This file is provided under a dual BSD/GPLv2 license. When using or 7 * redistributing this file, you may do so under either license. 8 * 9 * GPL LICENSE SUMMARY 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * BSD LICENSE 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 26 * - Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * - Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in 30 * the documentation and/or other materials provided with the 31 * distribution. 32 * - Neither the name of Intel Corporation nor the names of its 33 * contributors may be used to endorse or promote products derived 34 * from this software without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 47 * 48 */ 49 50 #include <linux/interrupt.h> 51 #include <linux/pci.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/mutex.h> 54 #include <linux/list.h> 55 #include <linux/scatterlist.h> 56 #include <linux/slab.h> 57 #include <linux/idr.h> 58 #include <linux/io.h> 59 #include <linux/fs.h> 60 #include <linux/completion.h> 61 #include <linux/kref.h> 62 #include <linux/sched.h> 63 #include <linux/cdev.h> 64 #include <linux/delay.h> 65 #include <linux/kthread.h> 66 #include <linux/i2c.h> 67 #include <linux/i2c-algo-bit.h> 68 #include <rdma/ib_hdrs.h> 69 #include <rdma/opa_addr.h> 70 #include <linux/rhashtable.h> 71 #include <linux/netdevice.h> 72 #include <rdma/rdma_vt.h> 73 74 #include "chip_registers.h" 75 #include "common.h" 76 #include "verbs.h" 77 #include "pio.h" 78 #include "chip.h" 79 #include "mad.h" 80 #include "qsfp.h" 81 #include "platform.h" 82 #include "affinity.h" 83 84 /* bumped 1 from s/w major version of TrueScale */ 85 #define HFI1_CHIP_VERS_MAJ 3U 86 87 /* don't care about this except printing */ 88 #define HFI1_CHIP_VERS_MIN 0U 89 90 /* The Organization Unique Identifier (Mfg code), and its position in GUID */ 91 #define HFI1_OUI 0x001175 92 #define HFI1_OUI_LSB 40 93 94 #define DROP_PACKET_OFF 0 95 #define DROP_PACKET_ON 1 96 97 #define NEIGHBOR_TYPE_HFI 0 98 #define NEIGHBOR_TYPE_SWITCH 1 99 100 extern unsigned long hfi1_cap_mask; 101 #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap) 102 #define HFI1_CAP_UGET_MASK(mask, cap) \ 103 (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap) 104 #define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap)) 105 #define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap)) 106 #define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap)) 107 #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap)) 108 #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \ 109 HFI1_CAP_MISC_MASK) 110 /* Offline Disabled Reason is 4-bits */ 111 #define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON) 112 113 /* 114 * Control context is always 0 and handles the error packets. 115 * It also handles the VL15 and multicast packets. 116 */ 117 #define HFI1_CTRL_CTXT 0 118 119 /* 120 * Driver context will store software counters for each of the events 121 * associated with these status registers 122 */ 123 #define NUM_CCE_ERR_STATUS_COUNTERS 41 124 #define NUM_RCV_ERR_STATUS_COUNTERS 64 125 #define NUM_MISC_ERR_STATUS_COUNTERS 13 126 #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36 127 #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4 128 #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64 129 #define NUM_SEND_ERR_STATUS_COUNTERS 3 130 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5 131 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24 132 133 /* 134 * per driver stats, either not device nor port-specific, or 135 * summed over all of the devices and ports. 136 * They are described by name via ipathfs filesystem, so layout 137 * and number of elements can change without breaking compatibility. 138 * If members are added or deleted hfi1_statnames[] in debugfs.c must 139 * change to match. 140 */ 141 struct hfi1_ib_stats { 142 __u64 sps_ints; /* number of interrupts handled */ 143 __u64 sps_errints; /* number of error interrupts */ 144 __u64 sps_txerrs; /* tx-related packet errors */ 145 __u64 sps_rcverrs; /* non-crc rcv packet errors */ 146 __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */ 147 __u64 sps_nopiobufs; /* no pio bufs avail from kernel */ 148 __u64 sps_ctxts; /* number of contexts currently open */ 149 __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */ 150 __u64 sps_buffull; 151 __u64 sps_hdrfull; 152 }; 153 154 extern struct hfi1_ib_stats hfi1_stats; 155 extern const struct pci_error_handlers hfi1_pci_err_handler; 156 157 /* 158 * First-cut criterion for "device is active" is 159 * two thousand dwords combined Tx, Rx traffic per 160 * 5-second interval. SMA packets are 64 dwords, 161 * and occur "a few per second", presumably each way. 162 */ 163 #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000) 164 165 /* 166 * Below contains all data related to a single context (formerly called port). 167 */ 168 169 struct hfi1_opcode_stats_perctx; 170 171 struct ctxt_eager_bufs { 172 ssize_t size; /* total size of eager buffers */ 173 u32 count; /* size of buffers array */ 174 u32 numbufs; /* number of buffers allocated */ 175 u32 alloced; /* number of rcvarray entries used */ 176 u32 rcvtid_size; /* size of each eager rcv tid */ 177 u32 threshold; /* head update threshold */ 178 struct eager_buffer { 179 void *addr; 180 dma_addr_t dma; 181 ssize_t len; 182 } *buffers; 183 struct { 184 void *addr; 185 dma_addr_t dma; 186 } *rcvtids; 187 }; 188 189 struct exp_tid_set { 190 struct list_head list; 191 u32 count; 192 }; 193 194 struct hfi1_ctxtdata { 195 /* shadow the ctxt's RcvCtrl register */ 196 u64 rcvctrl; 197 /* rcvhdrq base, needs mmap before useful */ 198 void *rcvhdrq; 199 /* kernel virtual address where hdrqtail is updated */ 200 volatile __le64 *rcvhdrtail_kvaddr; 201 /* when waiting for rcv or pioavail */ 202 wait_queue_head_t wait; 203 /* rcvhdrq size (for freeing) */ 204 size_t rcvhdrq_size; 205 /* number of rcvhdrq entries */ 206 u16 rcvhdrq_cnt; 207 /* size of each of the rcvhdrq entries */ 208 u16 rcvhdrqentsize; 209 /* mmap of hdrq, must fit in 44 bits */ 210 dma_addr_t rcvhdrq_dma; 211 dma_addr_t rcvhdrqtailaddr_dma; 212 struct ctxt_eager_bufs egrbufs; 213 /* this receive context's assigned PIO ACK send context */ 214 struct send_context *sc; 215 216 /* dynamic receive available interrupt timeout */ 217 u32 rcvavail_timeout; 218 /* Reference count the base context usage */ 219 struct kref kref; 220 221 /* Device context index */ 222 u16 ctxt; 223 /* 224 * non-zero if ctxt can be shared, and defines the maximum number of 225 * sub-contexts for this device context. 226 */ 227 u16 subctxt_cnt; 228 /* non-zero if ctxt is being shared. */ 229 u16 subctxt_id; 230 u8 uuid[16]; 231 /* job key */ 232 u16 jkey; 233 /* number of RcvArray groups for this context. */ 234 u32 rcv_array_groups; 235 /* index of first eager TID entry. */ 236 u32 eager_base; 237 /* number of expected TID entries */ 238 u32 expected_count; 239 /* index of first expected TID entry. */ 240 u32 expected_base; 241 242 struct exp_tid_set tid_group_list; 243 struct exp_tid_set tid_used_list; 244 struct exp_tid_set tid_full_list; 245 246 /* lock protecting all Expected TID data */ 247 struct mutex exp_lock; 248 /* per-context configuration flags */ 249 unsigned long flags; 250 /* per-context event flags for fileops/intr communication */ 251 unsigned long event_flags; 252 /* total number of polled urgent packets */ 253 u32 urgent; 254 /* saved total number of polled urgent packets for poll edge trigger */ 255 u32 urgent_poll; 256 /* same size as task_struct .comm[], command that opened context */ 257 char comm[TASK_COMM_LEN]; 258 /* so file ops can get at unit */ 259 struct hfi1_devdata *dd; 260 /* so functions that need physical port can get it easily */ 261 struct hfi1_pportdata *ppd; 262 /* associated msix interrupt */ 263 u32 msix_intr; 264 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */ 265 void *subctxt_uregbase; 266 /* An array of pages for the eager receive buffers * N */ 267 void *subctxt_rcvegrbuf; 268 /* An array of pages for the eager header queue entries * N */ 269 void *subctxt_rcvhdr_base; 270 /* Bitmask of in use context(s) */ 271 DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS); 272 /* The version of the library which opened this ctxt */ 273 u32 userversion; 274 /* Type of packets or conditions we want to poll for */ 275 u16 poll_type; 276 /* receive packet sequence counter */ 277 u8 seq_cnt; 278 /* ctxt rcvhdrq head offset */ 279 u32 head; 280 /* QPs waiting for context processing */ 281 struct list_head qp_wait_list; 282 /* interrupt handling */ 283 u64 imask; /* clear interrupt mask */ 284 int ireg; /* clear interrupt register */ 285 unsigned numa_id; /* numa node of this context */ 286 /* verbs rx_stats per rcd */ 287 struct hfi1_opcode_stats_perctx *opstats; 288 289 /* Is ASPM interrupt supported for this context */ 290 bool aspm_intr_supported; 291 /* ASPM state (enabled/disabled) for this context */ 292 bool aspm_enabled; 293 /* Timer for re-enabling ASPM if interrupt activity quietens down */ 294 struct timer_list aspm_timer; 295 /* Lock to serialize between intr, timer intr and user threads */ 296 spinlock_t aspm_lock; 297 /* Is ASPM processing enabled for this context (in intr context) */ 298 bool aspm_intr_enable; 299 /* Last interrupt timestamp */ 300 ktime_t aspm_ts_last_intr; 301 /* Last timestamp at which we scheduled a timer for this context */ 302 ktime_t aspm_ts_timer_sched; 303 304 /* 305 * The interrupt handler for a particular receive context can vary 306 * throughout it's lifetime. This is not a lock protected data member so 307 * it must be updated atomically and the prev and new value must always 308 * be valid. Worst case is we process an extra interrupt and up to 64 309 * packets with the wrong interrupt handler. 310 */ 311 int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded); 312 313 /* Indicates that this is vnic context */ 314 bool is_vnic; 315 316 /* vnic queue index this context is mapped to */ 317 u8 vnic_q_idx; 318 }; 319 320 /* 321 * Represents a single packet at a high level. Put commonly computed things in 322 * here so we do not have to keep doing them over and over. The rule of thumb is 323 * if something is used one time to derive some value, store that something in 324 * here. If it is used multiple times, then store the result of that derivation 325 * in here. 326 */ 327 struct hfi1_packet { 328 void *ebuf; 329 void *hdr; 330 void *payload; 331 struct hfi1_ctxtdata *rcd; 332 __le32 *rhf_addr; 333 struct rvt_qp *qp; 334 struct ib_other_headers *ohdr; 335 struct ib_grh *grh; 336 u64 rhf; 337 u32 maxcnt; 338 u32 rhqoff; 339 u32 dlid; 340 u32 slid; 341 u16 tlen; 342 s16 etail; 343 u16 pkey; 344 u8 hlen; 345 u8 numpkt; 346 u8 rsize; 347 u8 updegr; 348 u8 etype; 349 u8 extra_byte; 350 u8 pad; 351 u8 sc; 352 u8 sl; 353 u8 opcode; 354 bool migrated; 355 }; 356 357 /* Packet types */ 358 #define HFI1_PKT_TYPE_9B 0 359 #define HFI1_PKT_TYPE_16B 1 360 361 /* 362 * OPA 16B Header 363 */ 364 #define OPA_16B_L4_MASK 0xFFull 365 #define OPA_16B_SC_MASK 0x1F00000ull 366 #define OPA_16B_SC_SHIFT 20 367 #define OPA_16B_LID_MASK 0xFFFFFull 368 #define OPA_16B_DLID_MASK 0xF000ull 369 #define OPA_16B_DLID_SHIFT 20 370 #define OPA_16B_DLID_HIGH_SHIFT 12 371 #define OPA_16B_SLID_MASK 0xF00ull 372 #define OPA_16B_SLID_SHIFT 20 373 #define OPA_16B_SLID_HIGH_SHIFT 8 374 #define OPA_16B_BECN_MASK 0x80000000ull 375 #define OPA_16B_BECN_SHIFT 31 376 #define OPA_16B_FECN_MASK 0x10000000ull 377 #define OPA_16B_FECN_SHIFT 28 378 #define OPA_16B_L2_MASK 0x60000000ull 379 #define OPA_16B_L2_SHIFT 29 380 #define OPA_16B_PKEY_MASK 0xFFFF0000ull 381 #define OPA_16B_PKEY_SHIFT 16 382 #define OPA_16B_LEN_MASK 0x7FF00000ull 383 #define OPA_16B_LEN_SHIFT 20 384 #define OPA_16B_RC_MASK 0xE000000ull 385 #define OPA_16B_RC_SHIFT 25 386 #define OPA_16B_AGE_MASK 0xFF0000ull 387 #define OPA_16B_AGE_SHIFT 16 388 #define OPA_16B_ENTROPY_MASK 0xFFFFull 389 390 /* 391 * OPA 16B L2/L4 Encodings 392 */ 393 #define OPA_16B_L4_9B 0x00 394 #define OPA_16B_L2_TYPE 0x02 395 #define OPA_16B_L4_IB_LOCAL 0x09 396 #define OPA_16B_L4_IB_GLOBAL 0x0A 397 #define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR 398 399 static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr) 400 { 401 return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK); 402 } 403 404 static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr) 405 { 406 return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT); 407 } 408 409 static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr) 410 { 411 return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) | 412 (((hdr->lrh[2] & OPA_16B_DLID_MASK) >> 413 OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT)); 414 } 415 416 static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr) 417 { 418 return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) | 419 (((hdr->lrh[2] & OPA_16B_SLID_MASK) >> 420 OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT)); 421 } 422 423 static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr) 424 { 425 return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT); 426 } 427 428 static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr) 429 { 430 return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT); 431 } 432 433 static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr) 434 { 435 return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT); 436 } 437 438 static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr) 439 { 440 return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT); 441 } 442 443 static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr) 444 { 445 return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT); 446 } 447 448 static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr) 449 { 450 return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT); 451 } 452 453 static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr) 454 { 455 return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT); 456 } 457 458 static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr) 459 { 460 return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK); 461 } 462 463 #define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw)) 464 465 /* 466 * BTH 467 */ 468 #define OPA_16B_BTH_PAD_MASK 7 469 static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr) 470 { 471 return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) & 472 OPA_16B_BTH_PAD_MASK); 473 } 474 475 struct rvt_sge_state; 476 477 /* 478 * Get/Set IB link-level config parameters for f_get/set_ib_cfg() 479 * Mostly for MADs that set or query link parameters, also ipath 480 * config interfaces 481 */ 482 #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */ 483 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */ 484 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */ 485 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */ 486 #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */ 487 #define HFI1_IB_CFG_SPD 5 /* current Link spd */ 488 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */ 489 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */ 490 #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */ 491 #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */ 492 #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */ 493 #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */ 494 #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */ 495 #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */ 496 #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */ 497 #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */ 498 #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */ 499 #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */ 500 #define HFI1_IB_CFG_VL_HIGH_LIMIT 19 501 #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */ 502 #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */ 503 504 /* 505 * HFI or Host Link States 506 * 507 * These describe the states the driver thinks the logical and physical 508 * states are in. Used as an argument to set_link_state(). Implemented 509 * as bits for easy multi-state checking. The actual state can only be 510 * one. 511 */ 512 #define __HLS_UP_INIT_BP 0 513 #define __HLS_UP_ARMED_BP 1 514 #define __HLS_UP_ACTIVE_BP 2 515 #define __HLS_DN_DOWNDEF_BP 3 /* link down default */ 516 #define __HLS_DN_POLL_BP 4 517 #define __HLS_DN_DISABLE_BP 5 518 #define __HLS_DN_OFFLINE_BP 6 519 #define __HLS_VERIFY_CAP_BP 7 520 #define __HLS_GOING_UP_BP 8 521 #define __HLS_GOING_OFFLINE_BP 9 522 #define __HLS_LINK_COOLDOWN_BP 10 523 524 #define HLS_UP_INIT BIT(__HLS_UP_INIT_BP) 525 #define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP) 526 #define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP) 527 #define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */ 528 #define HLS_DN_POLL BIT(__HLS_DN_POLL_BP) 529 #define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP) 530 #define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP) 531 #define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP) 532 #define HLS_GOING_UP BIT(__HLS_GOING_UP_BP) 533 #define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP) 534 #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) 535 536 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) 537 #define HLS_DOWN ~(HLS_UP) 538 539 #define HLS_DEFAULT HLS_DN_POLL 540 541 /* use this MTU size if none other is given */ 542 #define HFI1_DEFAULT_ACTIVE_MTU 10240 543 /* use this MTU size as the default maximum */ 544 #define HFI1_DEFAULT_MAX_MTU 10240 545 /* default partition key */ 546 #define DEFAULT_PKEY 0xffff 547 548 /* 549 * Possible fabric manager config parameters for fm_{get,set}_table() 550 */ 551 #define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */ 552 #define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */ 553 #define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */ 554 #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */ 555 #define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */ 556 #define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */ 557 558 /* 559 * Possible "operations" for f_rcvctrl(ppd, op, ctxt) 560 * these are bits so they can be combined, e.g. 561 * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB 562 */ 563 #define HFI1_RCVCTRL_TAILUPD_ENB 0x01 564 #define HFI1_RCVCTRL_TAILUPD_DIS 0x02 565 #define HFI1_RCVCTRL_CTXT_ENB 0x04 566 #define HFI1_RCVCTRL_CTXT_DIS 0x08 567 #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10 568 #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20 569 #define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */ 570 #define HFI1_RCVCTRL_PKEY_DIS 0x80 571 #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400 572 #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800 573 #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000 574 #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000 575 #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000 576 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000 577 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000 578 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000 579 580 /* partition enforcement flags */ 581 #define HFI1_PART_ENFORCE_IN 0x1 582 #define HFI1_PART_ENFORCE_OUT 0x2 583 584 /* how often we check for synthetic counter wrap around */ 585 #define SYNTH_CNT_TIME 3 586 587 /* Counter flags */ 588 #define CNTR_NORMAL 0x0 /* Normal counters, just read register */ 589 #define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */ 590 #define CNTR_DISABLED 0x2 /* Disable this counter */ 591 #define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */ 592 #define CNTR_VL 0x8 /* Per VL counter */ 593 #define CNTR_SDMA 0x10 594 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */ 595 #define CNTR_MODE_W 0x0 596 #define CNTR_MODE_R 0x1 597 598 /* VLs Supported/Operational */ 599 #define HFI1_MIN_VLS_SUPPORTED 1 600 #define HFI1_MAX_VLS_SUPPORTED 8 601 602 #define HFI1_GUIDS_PER_PORT 5 603 #define HFI1_PORT_GUID_INDEX 0 604 605 static inline void incr_cntr64(u64 *cntr) 606 { 607 if (*cntr < (u64)-1LL) 608 (*cntr)++; 609 } 610 611 static inline void incr_cntr32(u32 *cntr) 612 { 613 if (*cntr < (u32)-1LL) 614 (*cntr)++; 615 } 616 617 #define MAX_NAME_SIZE 64 618 struct hfi1_msix_entry { 619 enum irq_type type; 620 int irq; 621 void *arg; 622 cpumask_t mask; 623 struct irq_affinity_notify notify; 624 }; 625 626 /* per-SL CCA information */ 627 struct cca_timer { 628 struct hrtimer hrtimer; 629 struct hfi1_pportdata *ppd; /* read-only */ 630 int sl; /* read-only */ 631 u16 ccti; /* read/write - current value of CCTI */ 632 }; 633 634 struct link_down_reason { 635 /* 636 * SMA-facing value. Should be set from .latest when 637 * HLS_UP_* -> HLS_DN_* transition actually occurs. 638 */ 639 u8 sma; 640 u8 latest; 641 }; 642 643 enum { 644 LO_PRIO_TABLE, 645 HI_PRIO_TABLE, 646 MAX_PRIO_TABLE 647 }; 648 649 struct vl_arb_cache { 650 /* protect vl arb cache */ 651 spinlock_t lock; 652 struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE]; 653 }; 654 655 /* 656 * The structure below encapsulates data relevant to a physical IB Port. 657 * Current chips support only one such port, but the separation 658 * clarifies things a bit. Note that to conform to IB conventions, 659 * port-numbers are one-based. The first or only port is port1. 660 */ 661 struct hfi1_pportdata { 662 struct hfi1_ibport ibport_data; 663 664 struct hfi1_devdata *dd; 665 struct kobject pport_cc_kobj; 666 struct kobject sc2vl_kobj; 667 struct kobject sl2sc_kobj; 668 struct kobject vl2mtu_kobj; 669 670 /* PHY support */ 671 struct qsfp_data qsfp_info; 672 /* Values for SI tuning of SerDes */ 673 u32 port_type; 674 u32 tx_preset_eq; 675 u32 tx_preset_noeq; 676 u32 rx_preset; 677 u8 local_atten; 678 u8 remote_atten; 679 u8 default_atten; 680 u8 max_power_class; 681 682 /* did we read platform config from scratch registers? */ 683 bool config_from_scratch; 684 685 /* GUIDs for this interface, in host order, guids[0] is a port guid */ 686 u64 guids[HFI1_GUIDS_PER_PORT]; 687 688 /* GUID for peer interface, in host order */ 689 u64 neighbor_guid; 690 691 /* up or down physical link state */ 692 u32 linkup; 693 694 /* 695 * this address is mapped read-only into user processes so they can 696 * get status cheaply, whenever they want. One qword of status per port 697 */ 698 u64 *statusp; 699 700 /* SendDMA related entries */ 701 702 struct workqueue_struct *hfi1_wq; 703 struct workqueue_struct *link_wq; 704 705 /* move out of interrupt context */ 706 struct work_struct link_vc_work; 707 struct work_struct link_up_work; 708 struct work_struct link_down_work; 709 struct work_struct sma_message_work; 710 struct work_struct freeze_work; 711 struct work_struct link_downgrade_work; 712 struct work_struct link_bounce_work; 713 struct delayed_work start_link_work; 714 /* host link state variables */ 715 struct mutex hls_lock; 716 u32 host_link_state; 717 718 /* these are the "32 bit" regs */ 719 720 u32 ibmtu; /* The MTU programmed for this unit */ 721 /* 722 * Current max size IB packet (in bytes) including IB headers, that 723 * we can send. Changes when ibmtu changes. 724 */ 725 u32 ibmaxlen; 726 u32 current_egress_rate; /* units [10^6 bits/sec] */ 727 /* LID programmed for this instance */ 728 u32 lid; 729 /* list of pkeys programmed; 0 if not set */ 730 u16 pkeys[MAX_PKEY_VALUES]; 731 u16 link_width_supported; 732 u16 link_width_downgrade_supported; 733 u16 link_speed_supported; 734 u16 link_width_enabled; 735 u16 link_width_downgrade_enabled; 736 u16 link_speed_enabled; 737 u16 link_width_active; 738 u16 link_width_downgrade_tx_active; 739 u16 link_width_downgrade_rx_active; 740 u16 link_speed_active; 741 u8 vls_supported; 742 u8 vls_operational; 743 u8 actual_vls_operational; 744 /* LID mask control */ 745 u8 lmc; 746 /* Rx Polarity inversion (compensate for ~tx on partner) */ 747 u8 rx_pol_inv; 748 749 u8 hw_pidx; /* physical port index */ 750 u8 port; /* IB port number and index into dd->pports - 1 */ 751 /* type of neighbor node */ 752 u8 neighbor_type; 753 u8 neighbor_normal; 754 u8 neighbor_fm_security; /* 1 if firmware checking is disabled */ 755 u8 neighbor_port_number; 756 u8 is_sm_config_started; 757 u8 offline_disabled_reason; 758 u8 is_active_optimize_enabled; 759 u8 driver_link_ready; /* driver ready for active link */ 760 u8 link_enabled; /* link enabled? */ 761 u8 linkinit_reason; 762 u8 local_tx_rate; /* rate given to 8051 firmware */ 763 u8 qsfp_retry_count; 764 765 /* placeholders for IB MAD packet settings */ 766 u8 overrun_threshold; 767 u8 phy_error_threshold; 768 unsigned int is_link_down_queued; 769 770 /* Used to override LED behavior for things like maintenance beaconing*/ 771 /* 772 * Alternates per phase of blink 773 * [0] holds LED off duration, [1] holds LED on duration 774 */ 775 unsigned long led_override_vals[2]; 776 u8 led_override_phase; /* LSB picks from vals[] */ 777 atomic_t led_override_timer_active; 778 /* Used to flash LEDs in override mode */ 779 struct timer_list led_override_timer; 780 781 u32 sm_trap_qp; 782 u32 sa_qp; 783 784 /* 785 * cca_timer_lock protects access to the per-SL cca_timer 786 * structures (specifically the ccti member). 787 */ 788 spinlock_t cca_timer_lock ____cacheline_aligned_in_smp; 789 struct cca_timer cca_timer[OPA_MAX_SLS]; 790 791 /* List of congestion control table entries */ 792 struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX]; 793 794 /* congestion entries, each entry corresponding to a SL */ 795 struct opa_congestion_setting_entry_shadow 796 congestion_entries[OPA_MAX_SLS]; 797 798 /* 799 * cc_state_lock protects (write) access to the per-port 800 * struct cc_state. 801 */ 802 spinlock_t cc_state_lock ____cacheline_aligned_in_smp; 803 804 struct cc_state __rcu *cc_state; 805 806 /* Total number of congestion control table entries */ 807 u16 total_cct_entry; 808 809 /* Bit map identifying service level */ 810 u32 cc_sl_control_map; 811 812 /* CA's max number of 64 entry units in the congestion control table */ 813 u8 cc_max_table_entries; 814 815 /* 816 * begin congestion log related entries 817 * cc_log_lock protects all congestion log related data 818 */ 819 spinlock_t cc_log_lock ____cacheline_aligned_in_smp; 820 u8 threshold_cong_event_map[OPA_MAX_SLS / 8]; 821 u16 threshold_event_counter; 822 struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS]; 823 int cc_log_idx; /* index for logging events */ 824 int cc_mad_idx; /* index for reporting events */ 825 /* end congestion log related entries */ 826 827 struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE]; 828 829 /* port relative counter buffer */ 830 u64 *cntrs; 831 /* port relative synthetic counter buffer */ 832 u64 *scntrs; 833 /* port_xmit_discards are synthesized from different egress errors */ 834 u64 port_xmit_discards; 835 u64 port_xmit_discards_vl[C_VL_COUNT]; 836 u64 port_xmit_constraint_errors; 837 u64 port_rcv_constraint_errors; 838 /* count of 'link_err' interrupts from DC */ 839 u64 link_downed; 840 /* number of times link retrained successfully */ 841 u64 link_up; 842 /* number of times a link unknown frame was reported */ 843 u64 unknown_frame_count; 844 /* port_ltp_crc_mode is returned in 'portinfo' MADs */ 845 u16 port_ltp_crc_mode; 846 /* port_crc_mode_enabled is the crc we support */ 847 u8 port_crc_mode_enabled; 848 /* mgmt_allowed is also returned in 'portinfo' MADs */ 849 u8 mgmt_allowed; 850 u8 part_enforce; /* partition enforcement flags */ 851 struct link_down_reason local_link_down_reason; 852 struct link_down_reason neigh_link_down_reason; 853 /* Value to be sent to link peer on LinkDown .*/ 854 u8 remote_link_down_reason; 855 /* Error events that will cause a port bounce. */ 856 u32 port_error_action; 857 struct work_struct linkstate_active_work; 858 /* Does this port need to prescan for FECNs */ 859 bool cc_prescan; 860 /* 861 * Sample sendWaitCnt & sendWaitVlCnt during link transition 862 * and counter request. 863 */ 864 u64 port_vl_xmit_wait_last[C_VL_COUNT + 1]; 865 u16 prev_link_width; 866 u64 vl_xmit_flit_cnt[C_VL_COUNT + 1]; 867 }; 868 869 typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); 870 871 typedef void (*opcode_handler)(struct hfi1_packet *packet); 872 typedef void (*hfi1_make_req)(struct rvt_qp *qp, 873 struct hfi1_pkt_state *ps, 874 struct rvt_swqe *wqe); 875 876 877 /* return values for the RHF receive functions */ 878 #define RHF_RCV_CONTINUE 0 /* keep going */ 879 #define RHF_RCV_DONE 1 /* stop, this packet processed */ 880 #define RHF_RCV_REPROCESS 2 /* stop. retain this packet */ 881 882 struct rcv_array_data { 883 u8 group_size; 884 u16 ngroups; 885 u16 nctxt_extra; 886 }; 887 888 struct per_vl_data { 889 u16 mtu; 890 struct send_context *sc; 891 }; 892 893 /* 16 to directly index */ 894 #define PER_VL_SEND_CONTEXTS 16 895 896 struct err_info_rcvport { 897 u8 status_and_code; 898 u64 packet_flit1; 899 u64 packet_flit2; 900 }; 901 902 struct err_info_constraint { 903 u8 status; 904 u16 pkey; 905 u32 slid; 906 }; 907 908 struct hfi1_temp { 909 unsigned int curr; /* current temperature */ 910 unsigned int lo_lim; /* low temperature limit */ 911 unsigned int hi_lim; /* high temperature limit */ 912 unsigned int crit_lim; /* critical temperature limit */ 913 u8 triggers; /* temperature triggers */ 914 }; 915 916 struct hfi1_i2c_bus { 917 struct hfi1_devdata *controlling_dd; /* current controlling device */ 918 struct i2c_adapter adapter; /* bus details */ 919 struct i2c_algo_bit_data algo; /* bus algorithm details */ 920 int num; /* bus number, 0 or 1 */ 921 }; 922 923 /* common data between shared ASIC HFIs */ 924 struct hfi1_asic_data { 925 struct hfi1_devdata *dds[2]; /* back pointers */ 926 struct mutex asic_resource_mutex; 927 struct hfi1_i2c_bus *i2c_bus0; 928 struct hfi1_i2c_bus *i2c_bus1; 929 }; 930 931 /* sizes for both the QP and RSM map tables */ 932 #define NUM_MAP_ENTRIES 256 933 #define NUM_MAP_REGS 32 934 935 /* 936 * Number of VNIC contexts used. Ensure it is less than or equal to 937 * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE). 938 */ 939 #define HFI1_NUM_VNIC_CTXT 8 940 941 /* Number of VNIC RSM entries */ 942 #define NUM_VNIC_MAP_ENTRIES 8 943 944 /* Virtual NIC information */ 945 struct hfi1_vnic_data { 946 struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT]; 947 struct kmem_cache *txreq_cache; 948 u8 num_vports; 949 struct idr vesw_idr; 950 u8 rmt_start; 951 u8 num_ctxt; 952 u32 msix_idx; 953 }; 954 955 struct hfi1_vnic_vport_info; 956 957 /* device data struct now contains only "general per-device" info. 958 * fields related to a physical IB port are in a hfi1_pportdata struct. 959 */ 960 struct sdma_engine; 961 struct sdma_vl_map; 962 963 #define BOARD_VERS_MAX 96 /* how long the version string can be */ 964 #define SERIAL_MAX 16 /* length of the serial number */ 965 966 typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); 967 struct hfi1_devdata { 968 struct hfi1_ibdev verbs_dev; /* must be first */ 969 struct list_head list; 970 /* pointers to related structs for this device */ 971 /* pci access data structure */ 972 struct pci_dev *pcidev; 973 struct cdev user_cdev; 974 struct cdev diag_cdev; 975 struct cdev ui_cdev; 976 struct device *user_device; 977 struct device *diag_device; 978 struct device *ui_device; 979 980 /* first mapping up to RcvArray */ 981 u8 __iomem *kregbase1; 982 resource_size_t physaddr; 983 984 /* second uncached mapping from RcvArray to pio send buffers */ 985 u8 __iomem *kregbase2; 986 /* for detecting offset above kregbase2 address */ 987 u32 base2_start; 988 989 /* Per VL data. Enough for all VLs but not all elements are set/used. */ 990 struct per_vl_data vld[PER_VL_SEND_CONTEXTS]; 991 /* send context data */ 992 struct send_context_info *send_contexts; 993 /* map hardware send contexts to software index */ 994 u8 *hw_to_sw; 995 /* spinlock for allocating and releasing send context resources */ 996 spinlock_t sc_lock; 997 /* lock for pio_map */ 998 spinlock_t pio_map_lock; 999 /* Send Context initialization lock. */ 1000 spinlock_t sc_init_lock; 1001 /* lock for sdma_map */ 1002 spinlock_t sde_map_lock; 1003 /* array of kernel send contexts */ 1004 struct send_context **kernel_send_context; 1005 /* array of vl maps */ 1006 struct pio_vl_map __rcu *pio_map; 1007 /* default flags to last descriptor */ 1008 u64 default_desc1; 1009 1010 /* fields common to all SDMA engines */ 1011 1012 volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */ 1013 dma_addr_t sdma_heads_phys; 1014 void *sdma_pad_dma; /* DMA'ed by chip */ 1015 dma_addr_t sdma_pad_phys; 1016 /* for deallocation */ 1017 size_t sdma_heads_size; 1018 /* number from the chip */ 1019 u32 chip_sdma_engines; 1020 /* num used */ 1021 u32 num_sdma; 1022 /* array of engines sized by num_sdma */ 1023 struct sdma_engine *per_sdma; 1024 /* array of vl maps */ 1025 struct sdma_vl_map __rcu *sdma_map; 1026 /* SPC freeze waitqueue and variable */ 1027 wait_queue_head_t sdma_unfreeze_wq; 1028 atomic_t sdma_unfreeze_count; 1029 1030 u32 lcb_access_count; /* count of LCB users */ 1031 1032 /* common data between shared ASIC HFIs in this OS */ 1033 struct hfi1_asic_data *asic_data; 1034 1035 /* mem-mapped pointer to base of PIO buffers */ 1036 void __iomem *piobase; 1037 /* 1038 * write-combining mem-mapped pointer to base of RcvArray 1039 * memory. 1040 */ 1041 void __iomem *rcvarray_wc; 1042 /* 1043 * credit return base - a per-NUMA range of DMA address that 1044 * the chip will use to update the per-context free counter 1045 */ 1046 struct credit_return_base *cr_base; 1047 1048 /* send context numbers and sizes for each type */ 1049 struct sc_config_sizes sc_sizes[SC_MAX]; 1050 1051 char *boardname; /* human readable board info */ 1052 1053 /* reset value */ 1054 u64 z_int_counter; 1055 u64 z_rcv_limit; 1056 u64 z_send_schedule; 1057 1058 u64 __percpu *send_schedule; 1059 /* number of reserved contexts for VNIC usage */ 1060 u16 num_vnic_contexts; 1061 /* number of receive contexts in use by the driver */ 1062 u32 num_rcv_contexts; 1063 /* number of pio send contexts in use by the driver */ 1064 u32 num_send_contexts; 1065 /* 1066 * number of ctxts available for PSM open 1067 */ 1068 u32 freectxts; 1069 /* total number of available user/PSM contexts */ 1070 u32 num_user_contexts; 1071 /* base receive interrupt timeout, in CSR units */ 1072 u32 rcv_intr_timeout_csr; 1073 1074 u32 freezelen; /* max length of freezemsg */ 1075 u64 __iomem *egrtidbase; 1076 spinlock_t sendctrl_lock; /* protect changes to SendCtrl */ 1077 spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */ 1078 spinlock_t uctxt_lock; /* protect rcd changes */ 1079 struct mutex dc8051_lock; /* exclusive access to 8051 */ 1080 struct workqueue_struct *update_cntr_wq; 1081 struct work_struct update_cntr_work; 1082 /* exclusive access to 8051 memory */ 1083 spinlock_t dc8051_memlock; 1084 int dc8051_timed_out; /* remember if the 8051 timed out */ 1085 /* 1086 * A page that will hold event notification bitmaps for all 1087 * contexts. This page will be mapped into all processes. 1088 */ 1089 unsigned long *events; 1090 /* 1091 * per unit status, see also portdata statusp 1092 * mapped read-only into user processes so they can get unit and 1093 * IB link status cheaply 1094 */ 1095 struct hfi1_status *status; 1096 1097 /* revision register shadow */ 1098 u64 revision; 1099 /* Base GUID for device (network order) */ 1100 u64 base_guid; 1101 1102 /* these are the "32 bit" regs */ 1103 1104 /* value we put in kr_rcvhdrsize */ 1105 u32 rcvhdrsize; 1106 /* number of receive contexts the chip supports */ 1107 u32 chip_rcv_contexts; 1108 /* number of receive array entries */ 1109 u32 chip_rcv_array_count; 1110 /* number of PIO send contexts the chip supports */ 1111 u32 chip_send_contexts; 1112 /* number of bytes in the PIO memory buffer */ 1113 u32 chip_pio_mem_size; 1114 /* number of bytes in the SDMA memory buffer */ 1115 u32 chip_sdma_mem_size; 1116 1117 /* size of each rcvegrbuffer */ 1118 u32 rcvegrbufsize; 1119 /* log2 of above */ 1120 u16 rcvegrbufsize_shift; 1121 /* both sides of the PCIe link are gen3 capable */ 1122 u8 link_gen3_capable; 1123 u8 dc_shutdown; 1124 /* localbus width (1, 2,4,8,16,32) from config space */ 1125 u32 lbus_width; 1126 /* localbus speed in MHz */ 1127 u32 lbus_speed; 1128 int unit; /* unit # of this chip */ 1129 int node; /* home node of this chip */ 1130 1131 /* save these PCI fields to restore after a reset */ 1132 u32 pcibar0; 1133 u32 pcibar1; 1134 u32 pci_rom; 1135 u16 pci_command; 1136 u16 pcie_devctl; 1137 u16 pcie_lnkctl; 1138 u16 pcie_devctl2; 1139 u32 pci_msix0; 1140 u32 pci_tph2; 1141 1142 /* 1143 * ASCII serial number, from flash, large enough for original 1144 * all digit strings, and longer serial number format 1145 */ 1146 u8 serial[SERIAL_MAX]; 1147 /* human readable board version */ 1148 u8 boardversion[BOARD_VERS_MAX]; 1149 u8 lbus_info[32]; /* human readable localbus info */ 1150 /* chip major rev, from CceRevision */ 1151 u8 majrev; 1152 /* chip minor rev, from CceRevision */ 1153 u8 minrev; 1154 /* hardware ID */ 1155 u8 hfi1_id; 1156 /* implementation code */ 1157 u8 icode; 1158 /* vAU of this device */ 1159 u8 vau; 1160 /* vCU of this device */ 1161 u8 vcu; 1162 /* link credits of this device */ 1163 u16 link_credits; 1164 /* initial vl15 credits to use */ 1165 u16 vl15_init; 1166 1167 /* 1168 * Cached value for vl15buf, read during verify cap interrupt. VL15 1169 * credits are to be kept at 0 and set when handling the link-up 1170 * interrupt. This removes the possibility of receiving VL15 MAD 1171 * packets before this HFI is ready. 1172 */ 1173 u16 vl15buf_cached; 1174 1175 /* Misc small ints */ 1176 u8 n_krcv_queues; 1177 u8 qos_shift; 1178 1179 u16 irev; /* implementation revision */ 1180 u32 dc8051_ver; /* 8051 firmware version */ 1181 1182 spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */ 1183 struct platform_config platform_config; 1184 struct platform_config_cache pcfg_cache; 1185 1186 struct diag_client *diag_client; 1187 1188 /* MSI-X information */ 1189 struct hfi1_msix_entry *msix_entries; 1190 u32 num_msix_entries; 1191 u32 first_dyn_msix_idx; 1192 1193 /* INTx information */ 1194 u32 requested_intx_irq; /* did we request one? */ 1195 1196 /* general interrupt: mask of handled interrupts */ 1197 u64 gi_mask[CCE_NUM_INT_CSRS]; 1198 1199 struct rcv_array_data rcv_entries; 1200 1201 /* cycle length of PS* counters in HW (in picoseconds) */ 1202 u16 psxmitwait_check_rate; 1203 1204 /* 1205 * 64 bit synthetic counters 1206 */ 1207 struct timer_list synth_stats_timer; 1208 1209 /* 1210 * device counters 1211 */ 1212 char *cntrnames; 1213 size_t cntrnameslen; 1214 size_t ndevcntrs; 1215 u64 *cntrs; 1216 u64 *scntrs; 1217 1218 /* 1219 * remembered values for synthetic counters 1220 */ 1221 u64 last_tx; 1222 u64 last_rx; 1223 1224 /* 1225 * per-port counters 1226 */ 1227 size_t nportcntrs; 1228 char *portcntrnames; 1229 size_t portcntrnameslen; 1230 1231 struct err_info_rcvport err_info_rcvport; 1232 struct err_info_constraint err_info_rcv_constraint; 1233 struct err_info_constraint err_info_xmit_constraint; 1234 1235 atomic_t drop_packet; 1236 u8 do_drop; 1237 u8 err_info_uncorrectable; 1238 u8 err_info_fmconfig; 1239 1240 /* 1241 * Software counters for the status bits defined by the 1242 * associated error status registers 1243 */ 1244 u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS]; 1245 u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS]; 1246 u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS]; 1247 u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS]; 1248 u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS]; 1249 u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS]; 1250 u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS]; 1251 1252 /* Software counter that spans all contexts */ 1253 u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS]; 1254 /* Software counter that spans all DMA engines */ 1255 u64 sw_send_dma_eng_err_status_cnt[ 1256 NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS]; 1257 /* Software counter that aggregates all cce_err_status errors */ 1258 u64 sw_cce_err_status_aggregate; 1259 /* Software counter that aggregates all bypass packet rcv errors */ 1260 u64 sw_rcv_bypass_packet_errors; 1261 /* receive interrupt function */ 1262 rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; 1263 1264 /* Save the enabled LCB error bits */ 1265 u64 lcb_err_en; 1266 1267 /* 1268 * Capability to have different send engines simply by changing a 1269 * pointer value. 1270 */ 1271 send_routine process_pio_send ____cacheline_aligned_in_smp; 1272 send_routine process_dma_send; 1273 void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf, 1274 u64 pbc, const void *from, size_t count); 1275 int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx, 1276 struct hfi1_vnic_vport_info *vinfo, 1277 struct sk_buff *skb, u64 pbc, u8 plen); 1278 /* hfi1_pportdata, points to array of (physical) port-specific 1279 * data structs, indexed by pidx (0..n-1) 1280 */ 1281 struct hfi1_pportdata *pport; 1282 /* receive context data */ 1283 struct hfi1_ctxtdata **rcd; 1284 u64 __percpu *int_counter; 1285 /* verbs tx opcode stats */ 1286 struct hfi1_opcode_stats_perctx __percpu *tx_opstats; 1287 /* device (not port) flags, basically device capabilities */ 1288 u16 flags; 1289 /* Number of physical ports available */ 1290 u8 num_pports; 1291 /* Lowest context number which can be used by user processes or VNIC */ 1292 u8 first_dyn_alloc_ctxt; 1293 /* adding a new field here would make it part of this cacheline */ 1294 1295 /* seqlock for sc2vl */ 1296 seqlock_t sc2vl_lock ____cacheline_aligned_in_smp; 1297 u64 sc2vl[4]; 1298 /* receive interrupt functions */ 1299 rhf_rcv_function_ptr *rhf_rcv_function_map; 1300 u64 __percpu *rcv_limit; 1301 u16 rhf_offset; /* offset of RHF within receive header entry */ 1302 /* adding a new field here would make it part of this cacheline */ 1303 1304 /* OUI comes from the HW. Used everywhere as 3 separate bytes. */ 1305 u8 oui1; 1306 u8 oui2; 1307 u8 oui3; 1308 1309 /* Timer and counter used to detect RcvBufOvflCnt changes */ 1310 struct timer_list rcverr_timer; 1311 1312 wait_queue_head_t event_queue; 1313 1314 /* receive context tail dummy address */ 1315 __le64 *rcvhdrtail_dummy_kvaddr; 1316 dma_addr_t rcvhdrtail_dummy_dma; 1317 1318 u32 rcv_ovfl_cnt; 1319 /* Serialize ASPM enable/disable between multiple verbs contexts */ 1320 spinlock_t aspm_lock; 1321 /* Number of verbs contexts which have disabled ASPM */ 1322 atomic_t aspm_disabled_cnt; 1323 /* Keeps track of user space clients */ 1324 atomic_t user_refcount; 1325 /* Used to wait for outstanding user space clients before dev removal */ 1326 struct completion user_comp; 1327 1328 bool eprom_available; /* true if EPROM is available for this device */ 1329 bool aspm_supported; /* Does HW support ASPM */ 1330 bool aspm_enabled; /* ASPM state: enabled/disabled */ 1331 struct rhashtable *sdma_rht; 1332 1333 struct kobject kobj; 1334 1335 /* vnic data */ 1336 struct hfi1_vnic_data vnic; 1337 }; 1338 1339 static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare) 1340 { 1341 return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES; 1342 } 1343 1344 /* 8051 firmware version helper */ 1345 #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c)) 1346 #define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16) 1347 #define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8) 1348 #define dc8051_ver_patch(a) ((a) & 0x0000ff) 1349 1350 /* f_put_tid types */ 1351 #define PT_EXPECTED 0 1352 #define PT_EAGER 1 1353 #define PT_INVALID_FLUSH 2 1354 #define PT_INVALID 3 1355 1356 struct tid_rb_node; 1357 struct mmu_rb_node; 1358 struct mmu_rb_handler; 1359 1360 /* Private data for file operations */ 1361 struct hfi1_filedata { 1362 struct hfi1_devdata *dd; 1363 struct hfi1_ctxtdata *uctxt; 1364 struct hfi1_user_sdma_comp_q *cq; 1365 struct hfi1_user_sdma_pkt_q *pq; 1366 u16 subctxt; 1367 /* for cpu affinity; -1 if none */ 1368 int rec_cpu_num; 1369 u32 tid_n_pinned; 1370 struct mmu_rb_handler *handler; 1371 struct tid_rb_node **entry_to_rb; 1372 spinlock_t tid_lock; /* protect tid_[limit,used] counters */ 1373 u32 tid_limit; 1374 u32 tid_used; 1375 u32 *invalid_tids; 1376 u32 invalid_tid_idx; 1377 /* protect invalid_tids array and invalid_tid_idx */ 1378 spinlock_t invalid_lock; 1379 struct mm_struct *mm; 1380 }; 1381 1382 extern struct list_head hfi1_dev_list; 1383 extern spinlock_t hfi1_devs_lock; 1384 struct hfi1_devdata *hfi1_lookup(int unit); 1385 1386 static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt) 1387 { 1388 return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * 1389 HFI1_MAX_SHARED_CTXTS; 1390 } 1391 1392 int hfi1_init(struct hfi1_devdata *dd, int reinit); 1393 int hfi1_count_active_units(void); 1394 1395 int hfi1_diag_add(struct hfi1_devdata *dd); 1396 void hfi1_diag_remove(struct hfi1_devdata *dd); 1397 void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup); 1398 1399 void handle_user_interrupt(struct hfi1_ctxtdata *rcd); 1400 1401 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1402 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd); 1403 int hfi1_create_kctxts(struct hfi1_devdata *dd); 1404 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 1405 struct hfi1_ctxtdata **rcd); 1406 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd); 1407 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 1408 struct hfi1_devdata *dd, u8 hw_pidx, u8 port); 1409 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1410 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd); 1411 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd); 1412 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 1413 u16 ctxt); 1414 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt); 1415 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread); 1416 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread); 1417 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread); 1418 void set_all_slowpath(struct hfi1_devdata *dd); 1419 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd); 1420 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd); 1421 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd); 1422 1423 extern const struct pci_device_id hfi1_pci_tbl[]; 1424 void hfi1_make_ud_req_9B(struct rvt_qp *qp, 1425 struct hfi1_pkt_state *ps, 1426 struct rvt_swqe *wqe); 1427 1428 void hfi1_make_ud_req_16B(struct rvt_qp *qp, 1429 struct hfi1_pkt_state *ps, 1430 struct rvt_swqe *wqe); 1431 1432 /* receive packet handler dispositions */ 1433 #define RCV_PKT_OK 0x0 /* keep going */ 1434 #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */ 1435 #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */ 1436 1437 /* calculate the current RHF address */ 1438 static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd) 1439 { 1440 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->dd->rhf_offset; 1441 } 1442 1443 int hfi1_reset_device(int); 1444 1445 void receive_interrupt_work(struct work_struct *work); 1446 1447 /* extract service channel from header and rhf */ 1448 static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf) 1449 { 1450 return ib_get_sc(hdr) | ((!!(rhf_dc_info(rhf))) << 4); 1451 } 1452 1453 #define HFI1_JKEY_WIDTH 16 1454 #define HFI1_JKEY_MASK (BIT(16) - 1) 1455 #define HFI1_ADMIN_JKEY_RANGE 32 1456 1457 /* 1458 * J_KEYs are split and allocated in the following groups: 1459 * 0 - 31 - users with administrator privileges 1460 * 32 - 63 - kernel protocols using KDETH packets 1461 * 64 - 65535 - all other users using KDETH packets 1462 */ 1463 static inline u16 generate_jkey(kuid_t uid) 1464 { 1465 u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK; 1466 1467 if (capable(CAP_SYS_ADMIN)) 1468 jkey &= HFI1_ADMIN_JKEY_RANGE - 1; 1469 else if (jkey < 64) 1470 jkey |= BIT(HFI1_JKEY_WIDTH - 1); 1471 1472 return jkey; 1473 } 1474 1475 /* 1476 * active_egress_rate 1477 * 1478 * returns the active egress rate in units of [10^6 bits/sec] 1479 */ 1480 static inline u32 active_egress_rate(struct hfi1_pportdata *ppd) 1481 { 1482 u16 link_speed = ppd->link_speed_active; 1483 u16 link_width = ppd->link_width_active; 1484 u32 egress_rate; 1485 1486 if (link_speed == OPA_LINK_SPEED_25G) 1487 egress_rate = 25000; 1488 else /* assume OPA_LINK_SPEED_12_5G */ 1489 egress_rate = 12500; 1490 1491 switch (link_width) { 1492 case OPA_LINK_WIDTH_4X: 1493 egress_rate *= 4; 1494 break; 1495 case OPA_LINK_WIDTH_3X: 1496 egress_rate *= 3; 1497 break; 1498 case OPA_LINK_WIDTH_2X: 1499 egress_rate *= 2; 1500 break; 1501 default: 1502 /* assume IB_WIDTH_1X */ 1503 break; 1504 } 1505 1506 return egress_rate; 1507 } 1508 1509 /* 1510 * egress_cycles 1511 * 1512 * Returns the number of 'fabric clock cycles' to egress a packet 1513 * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock 1514 * rate is (approximately) 805 MHz, the units of the returned value 1515 * are (1/805 MHz). 1516 */ 1517 static inline u32 egress_cycles(u32 len, u32 rate) 1518 { 1519 u32 cycles; 1520 1521 /* 1522 * cycles is: 1523 * 1524 * (length) [bits] / (rate) [bits/sec] 1525 * --------------------------------------------------- 1526 * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec] 1527 */ 1528 1529 cycles = len * 8; /* bits */ 1530 cycles *= 805; 1531 cycles /= rate; 1532 1533 return cycles; 1534 } 1535 1536 void set_link_ipg(struct hfi1_pportdata *ppd); 1537 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, 1538 u32 rqpn, u8 svc_type); 1539 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, 1540 u32 pkey, u32 slid, u32 dlid, u8 sc5, 1541 const struct ib_grh *old_grh); 1542 void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, 1543 u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, 1544 u8 sc5, const struct ib_grh *old_grh); 1545 typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp, 1546 u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, 1547 u8 sc5, const struct ib_grh *old_grh); 1548 1549 #define PKEY_CHECK_INVALID -1 1550 int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey, 1551 u8 sc5, int8_t s_pkey_index); 1552 1553 #define PACKET_EGRESS_TIMEOUT 350 1554 static inline void pause_for_credit_return(struct hfi1_devdata *dd) 1555 { 1556 /* Pause at least 1us, to ensure chip returns all credits */ 1557 u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000; 1558 1559 udelay(usec ? usec : 1); 1560 } 1561 1562 /** 1563 * sc_to_vlt() reverse lookup sc to vl 1564 * @dd - devdata 1565 * @sc5 - 5 bit sc 1566 */ 1567 static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5) 1568 { 1569 unsigned seq; 1570 u8 rval; 1571 1572 if (sc5 >= OPA_MAX_SCS) 1573 return (u8)(0xff); 1574 1575 do { 1576 seq = read_seqbegin(&dd->sc2vl_lock); 1577 rval = *(((u8 *)dd->sc2vl) + sc5); 1578 } while (read_seqretry(&dd->sc2vl_lock, seq)); 1579 1580 return rval; 1581 } 1582 1583 #define PKEY_MEMBER_MASK 0x8000 1584 #define PKEY_LOW_15_MASK 0x7fff 1585 1586 /* 1587 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent 1588 * being an entry from the ingress partition key table), return 0 1589 * otherwise. Use the matching criteria for ingress partition keys 1590 * specified in the OPAv1 spec., section 9.10.14. 1591 */ 1592 static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent) 1593 { 1594 u16 mkey = pkey & PKEY_LOW_15_MASK; 1595 u16 ment = ent & PKEY_LOW_15_MASK; 1596 1597 if (mkey == ment) { 1598 /* 1599 * If pkey[15] is clear (limited partition member), 1600 * is bit 15 in the corresponding table element 1601 * clear (limited member)? 1602 */ 1603 if (!(pkey & PKEY_MEMBER_MASK)) 1604 return !!(ent & PKEY_MEMBER_MASK); 1605 return 1; 1606 } 1607 return 0; 1608 } 1609 1610 /* 1611 * ingress_pkey_table_search - search the entire pkey table for 1612 * an entry which matches 'pkey'. return 0 if a match is found, 1613 * and 1 otherwise. 1614 */ 1615 static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey) 1616 { 1617 int i; 1618 1619 for (i = 0; i < MAX_PKEY_VALUES; i++) { 1620 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i])) 1621 return 0; 1622 } 1623 return 1; 1624 } 1625 1626 /* 1627 * ingress_pkey_table_fail - record a failure of ingress pkey validation, 1628 * i.e., increment port_rcv_constraint_errors for the port, and record 1629 * the 'error info' for this failure. 1630 */ 1631 static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey, 1632 u32 slid) 1633 { 1634 struct hfi1_devdata *dd = ppd->dd; 1635 1636 incr_cntr64(&ppd->port_rcv_constraint_errors); 1637 if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) { 1638 dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK; 1639 dd->err_info_rcv_constraint.slid = slid; 1640 dd->err_info_rcv_constraint.pkey = pkey; 1641 } 1642 } 1643 1644 /* 1645 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1 1646 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx 1647 * is a hint as to the best place in the partition key table to begin 1648 * searching. This function should not be called on the data path because 1649 * of performance reasons. On datapath pkey check is expected to be done 1650 * by HW and rcv_pkey_check function should be called instead. 1651 */ 1652 static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey, 1653 u8 sc5, u8 idx, u32 slid, bool force) 1654 { 1655 if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) 1656 return 0; 1657 1658 /* If SC15, pkey[0:14] must be 0x7fff */ 1659 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) 1660 goto bad; 1661 1662 /* Is the pkey = 0x0, or 0x8000? */ 1663 if ((pkey & PKEY_LOW_15_MASK) == 0) 1664 goto bad; 1665 1666 /* The most likely matching pkey has index 'idx' */ 1667 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx])) 1668 return 0; 1669 1670 /* no match - try the whole table */ 1671 if (!ingress_pkey_table_search(ppd, pkey)) 1672 return 0; 1673 1674 bad: 1675 ingress_pkey_table_fail(ppd, pkey, slid); 1676 return 1; 1677 } 1678 1679 /* 1680 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1 1681 * otherwise. It only ensures pkey is vlid for QP0. This function 1682 * should be called on the data path instead of ingress_pkey_check 1683 * as on data path, pkey check is done by HW (except for QP0). 1684 */ 1685 static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey, 1686 u8 sc5, u16 slid) 1687 { 1688 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) 1689 return 0; 1690 1691 /* If SC15, pkey[0:14] must be 0x7fff */ 1692 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) 1693 goto bad; 1694 1695 return 0; 1696 bad: 1697 ingress_pkey_table_fail(ppd, pkey, slid); 1698 return 1; 1699 } 1700 1701 /* MTU handling */ 1702 1703 /* MTU enumeration, 256-4k match IB */ 1704 #define OPA_MTU_0 0 1705 #define OPA_MTU_256 1 1706 #define OPA_MTU_512 2 1707 #define OPA_MTU_1024 3 1708 #define OPA_MTU_2048 4 1709 #define OPA_MTU_4096 5 1710 1711 u32 lrh_max_header_bytes(struct hfi1_devdata *dd); 1712 int mtu_to_enum(u32 mtu, int default_if_bad); 1713 u16 enum_to_mtu(int mtu); 1714 static inline int valid_ib_mtu(unsigned int mtu) 1715 { 1716 return mtu == 256 || mtu == 512 || 1717 mtu == 1024 || mtu == 2048 || 1718 mtu == 4096; 1719 } 1720 1721 static inline int valid_opa_max_mtu(unsigned int mtu) 1722 { 1723 return mtu >= 2048 && 1724 (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240); 1725 } 1726 1727 int set_mtu(struct hfi1_pportdata *ppd); 1728 1729 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc); 1730 void hfi1_disable_after_error(struct hfi1_devdata *dd); 1731 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit); 1732 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); 1733 1734 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); 1735 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); 1736 1737 void set_up_vau(struct hfi1_devdata *dd, u8 vau); 1738 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); 1739 void reset_link_credits(struct hfi1_devdata *dd); 1740 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1741 1742 int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); 1743 1744 static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) 1745 { 1746 return ppd->dd; 1747 } 1748 1749 static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev) 1750 { 1751 return container_of(dev, struct hfi1_devdata, verbs_dev); 1752 } 1753 1754 static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev) 1755 { 1756 return dd_from_dev(to_idev(ibdev)); 1757 } 1758 1759 static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp) 1760 { 1761 return container_of(ibp, struct hfi1_pportdata, ibport_data); 1762 } 1763 1764 static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi) 1765 { 1766 return container_of(rdi, struct hfi1_ibdev, rdi); 1767 } 1768 1769 static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port) 1770 { 1771 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1772 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 1773 1774 WARN_ON(pidx >= dd->num_pports); 1775 return &dd->pport[pidx].ibport_data; 1776 } 1777 1778 static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd) 1779 { 1780 return &rcd->ppd->ibport_data; 1781 } 1782 1783 void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 1784 bool do_cnp); 1785 static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt, 1786 bool do_cnp) 1787 { 1788 bool becn; 1789 bool fecn; 1790 1791 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 1792 fecn = hfi1_16B_get_fecn(pkt->hdr); 1793 becn = hfi1_16B_get_becn(pkt->hdr); 1794 } else { 1795 fecn = ib_bth_get_fecn(pkt->ohdr); 1796 becn = ib_bth_get_becn(pkt->ohdr); 1797 } 1798 if (unlikely(fecn || becn)) { 1799 hfi1_process_ecn_slowpath(qp, pkt, do_cnp); 1800 return fecn; 1801 } 1802 return false; 1803 } 1804 1805 /* 1806 * Return the indexed PKEY from the port PKEY table. 1807 */ 1808 static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index) 1809 { 1810 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1811 u16 ret; 1812 1813 if (index >= ARRAY_SIZE(ppd->pkeys)) 1814 ret = 0; 1815 else 1816 ret = ppd->pkeys[index]; 1817 1818 return ret; 1819 } 1820 1821 /* 1822 * Return the indexed GUID from the port GUIDs table. 1823 */ 1824 static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index) 1825 { 1826 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1827 1828 WARN_ON(index >= HFI1_GUIDS_PER_PORT); 1829 return cpu_to_be64(ppd->guids[index]); 1830 } 1831 1832 /* 1833 * Called by readers of cc_state only, must call under rcu_read_lock(). 1834 */ 1835 static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd) 1836 { 1837 return rcu_dereference(ppd->cc_state); 1838 } 1839 1840 /* 1841 * Called by writers of cc_state only, must call under cc_state_lock. 1842 */ 1843 static inline 1844 struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) 1845 { 1846 return rcu_dereference_protected(ppd->cc_state, 1847 lockdep_is_held(&ppd->cc_state_lock)); 1848 } 1849 1850 /* 1851 * values for dd->flags (_device_ related flags) 1852 */ 1853 #define HFI1_INITTED 0x1 /* chip and driver up and initted */ 1854 #define HFI1_PRESENT 0x2 /* chip accesses can be done */ 1855 #define HFI1_FROZEN 0x4 /* chip in SPC freeze */ 1856 #define HFI1_HAS_SDMA_TIMEOUT 0x8 1857 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */ 1858 #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */ 1859 1860 /* IB dword length mask in PBC (lower 11 bits); same for all chips */ 1861 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) 1862 1863 /* ctxt_flag bit offsets */ 1864 /* base context has not finished initializing */ 1865 #define HFI1_CTXT_BASE_UNINIT 1 1866 /* base context initaliation failed */ 1867 #define HFI1_CTXT_BASE_FAILED 2 1868 /* waiting for a packet to arrive */ 1869 #define HFI1_CTXT_WAITING_RCV 3 1870 /* waiting for an urgent packet to arrive */ 1871 #define HFI1_CTXT_WAITING_URG 4 1872 1873 /* free up any allocated data at closes */ 1874 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, 1875 const struct pci_device_id *ent); 1876 void hfi1_free_devdata(struct hfi1_devdata *dd); 1877 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); 1878 1879 /* LED beaconing functions */ 1880 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1881 unsigned int timeoff); 1882 void shutdown_led_override(struct hfi1_pportdata *ppd); 1883 1884 #define HFI1_CREDIT_RETURN_RATE (100) 1885 1886 /* 1887 * The number of words for the KDETH protocol field. If this is 1888 * larger then the actual field used, then part of the payload 1889 * will be in the header. 1890 * 1891 * Optimally, we want this sized so that a typical case will 1892 * use full cache lines. The typical local KDETH header would 1893 * be: 1894 * 1895 * Bytes Field 1896 * 8 LRH 1897 * 12 BHT 1898 * ?? KDETH 1899 * 8 RHF 1900 * --- 1901 * 28 + KDETH 1902 * 1903 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS 1904 */ 1905 #define DEFAULT_RCVHDRSIZE 9 1906 1907 /* 1908 * Maximal header byte count: 1909 * 1910 * Bytes Field 1911 * 8 LRH 1912 * 40 GRH (optional) 1913 * 12 BTH 1914 * ?? KDETH 1915 * 8 RHF 1916 * --- 1917 * 68 + KDETH 1918 * 1919 * We also want to maintain a cache line alignment to assist DMA'ing 1920 * of the header bytes. Round up to a good size. 1921 */ 1922 #define DEFAULT_RCVHDR_ENTSIZE 32 1923 1924 bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, 1925 u32 nlocked, u32 npages); 1926 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, 1927 size_t npages, bool writable, struct page **pages); 1928 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, 1929 size_t npages, bool dirty); 1930 1931 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) 1932 { 1933 *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL; 1934 } 1935 1936 static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) 1937 { 1938 /* 1939 * volatile because it's a DMA target from the chip, routine is 1940 * inlined, and don't want register caching or reordering. 1941 */ 1942 return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr); 1943 } 1944 1945 /* 1946 * sysfs interface. 1947 */ 1948 1949 extern const char ib_hfi1_version[]; 1950 1951 int hfi1_device_create(struct hfi1_devdata *dd); 1952 void hfi1_device_remove(struct hfi1_devdata *dd); 1953 1954 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, 1955 struct kobject *kobj); 1956 int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd); 1957 void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd); 1958 /* Hook for sysfs read of QSFP */ 1959 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); 1960 1961 int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); 1962 void hfi1_clean_up_interrupts(struct hfi1_devdata *dd); 1963 void hfi1_pcie_cleanup(struct pci_dev *pdev); 1964 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); 1965 void hfi1_pcie_ddcleanup(struct hfi1_devdata *); 1966 int pcie_speeds(struct hfi1_devdata *dd); 1967 int request_msix(struct hfi1_devdata *dd, u32 msireq); 1968 int restore_pci_variables(struct hfi1_devdata *dd); 1969 int save_pci_variables(struct hfi1_devdata *dd); 1970 int do_pcie_gen3_transition(struct hfi1_devdata *dd); 1971 int parse_platform_config(struct hfi1_devdata *dd); 1972 int get_platform_config_field(struct hfi1_devdata *dd, 1973 enum platform_config_table_type_encoding 1974 table_type, int table_index, int field_index, 1975 u32 *data, u32 len); 1976 1977 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi); 1978 1979 /* 1980 * Flush write combining store buffers (if present) and perform a write 1981 * barrier. 1982 */ 1983 static inline void flush_wc(void) 1984 { 1985 asm volatile("sfence" : : : "memory"); 1986 } 1987 1988 void handle_eflags(struct hfi1_packet *packet); 1989 int process_receive_ib(struct hfi1_packet *packet); 1990 int process_receive_bypass(struct hfi1_packet *packet); 1991 int process_receive_error(struct hfi1_packet *packet); 1992 int kdeth_process_expected(struct hfi1_packet *packet); 1993 int kdeth_process_eager(struct hfi1_packet *packet); 1994 int process_receive_invalid(struct hfi1_packet *packet); 1995 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd); 1996 1997 /* global module parameter variables */ 1998 extern unsigned int hfi1_max_mtu; 1999 extern unsigned int hfi1_cu; 2000 extern unsigned int user_credit_return_threshold; 2001 extern int num_user_contexts; 2002 extern unsigned long n_krcvqs; 2003 extern uint krcvqs[]; 2004 extern int krcvqsset; 2005 extern uint kdeth_qp; 2006 extern uint loopback; 2007 extern uint quick_linkup; 2008 extern uint rcv_intr_timeout; 2009 extern uint rcv_intr_count; 2010 extern uint rcv_intr_dynamic; 2011 extern ushort link_crc_mask; 2012 2013 extern struct mutex hfi1_mutex; 2014 2015 /* Number of seconds before our card status check... */ 2016 #define STATUS_TIMEOUT 60 2017 2018 #define DRIVER_NAME "hfi1" 2019 #define HFI1_USER_MINOR_BASE 0 2020 #define HFI1_TRACE_MINOR 127 2021 #define HFI1_NMINORS 255 2022 2023 #define PCI_VENDOR_ID_INTEL 0x8086 2024 #define PCI_DEVICE_ID_INTEL0 0x24f0 2025 #define PCI_DEVICE_ID_INTEL1 0x24f1 2026 2027 #define HFI1_PKT_USER_SC_INTEGRITY \ 2028 (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \ 2029 | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK \ 2030 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \ 2031 | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK) 2032 2033 #define HFI1_PKT_KERNEL_SC_INTEGRITY \ 2034 (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK) 2035 2036 static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, 2037 u16 ctxt_type) 2038 { 2039 u64 base_sc_integrity; 2040 2041 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 2042 if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 2043 return 0; 2044 2045 base_sc_integrity = 2046 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 2047 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 2048 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 2049 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 2050 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 2051 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK 2052 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK 2053 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 2054 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 2055 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK 2056 | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 2057 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 2058 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 2059 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 2060 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 2061 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; 2062 2063 if (ctxt_type == SC_USER) 2064 base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY; 2065 else 2066 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; 2067 2068 /* turn on send-side job key checks if !A0 */ 2069 if (!is_ax(dd)) 2070 base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 2071 2072 return base_sc_integrity; 2073 } 2074 2075 static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) 2076 { 2077 u64 base_sdma_integrity; 2078 2079 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 2080 if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 2081 return 0; 2082 2083 base_sdma_integrity = 2084 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 2085 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 2086 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 2087 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 2088 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK 2089 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 2090 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 2091 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK 2092 | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 2093 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 2094 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 2095 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 2096 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 2097 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; 2098 2099 if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) 2100 base_sdma_integrity |= 2101 SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; 2102 2103 /* turn on send-side job key checks if !A0 */ 2104 if (!is_ax(dd)) 2105 base_sdma_integrity |= 2106 SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 2107 2108 return base_sdma_integrity; 2109 } 2110 2111 /* 2112 * hfi1_early_err is used (only!) to print early errors before devdata is 2113 * allocated, or when dd->pcidev may not be valid, and at the tail end of 2114 * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is 2115 * the same as dd_dev_err, but is used when the message really needs 2116 * the IB port# to be definitive as to what's happening.. 2117 */ 2118 #define hfi1_early_err(dev, fmt, ...) \ 2119 dev_err(dev, fmt, ##__VA_ARGS__) 2120 2121 #define hfi1_early_info(dev, fmt, ...) \ 2122 dev_info(dev, fmt, ##__VA_ARGS__) 2123 2124 #define dd_dev_emerg(dd, fmt, ...) \ 2125 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \ 2126 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__) 2127 2128 #define dd_dev_err(dd, fmt, ...) \ 2129 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ 2130 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__) 2131 2132 #define dd_dev_err_ratelimited(dd, fmt, ...) \ 2133 dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \ 2134 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \ 2135 ##__VA_ARGS__) 2136 2137 #define dd_dev_warn(dd, fmt, ...) \ 2138 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \ 2139 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__) 2140 2141 #define dd_dev_warn_ratelimited(dd, fmt, ...) \ 2142 dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \ 2143 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \ 2144 ##__VA_ARGS__) 2145 2146 #define dd_dev_info(dd, fmt, ...) \ 2147 dev_info(&(dd)->pcidev->dev, "%s: " fmt, \ 2148 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__) 2149 2150 #define dd_dev_info_ratelimited(dd, fmt, ...) \ 2151 dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \ 2152 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \ 2153 ##__VA_ARGS__) 2154 2155 #define dd_dev_dbg(dd, fmt, ...) \ 2156 dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \ 2157 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__) 2158 2159 #define hfi1_dev_porterr(dd, port, fmt, ...) \ 2160 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \ 2161 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__) 2162 2163 /* 2164 * this is used for formatting hw error messages... 2165 */ 2166 struct hfi1_hwerror_msgs { 2167 u64 mask; 2168 const char *msg; 2169 size_t sz; 2170 }; 2171 2172 /* in intr.c... */ 2173 void hfi1_format_hwerrors(u64 hwerrs, 2174 const struct hfi1_hwerror_msgs *hwerrmsgs, 2175 size_t nhwerrmsgs, char *msg, size_t lmsg); 2176 2177 #define USER_OPCODE_CHECK_VAL 0xC0 2178 #define USER_OPCODE_CHECK_MASK 0xC0 2179 #define OPCODE_CHECK_VAL_DISABLED 0x0 2180 #define OPCODE_CHECK_MASK_DISABLED 0x0 2181 2182 static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd) 2183 { 2184 struct hfi1_pportdata *ppd; 2185 int i; 2186 2187 dd->z_int_counter = get_all_cpu_total(dd->int_counter); 2188 dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit); 2189 dd->z_send_schedule = get_all_cpu_total(dd->send_schedule); 2190 2191 ppd = (struct hfi1_pportdata *)(dd + 1); 2192 for (i = 0; i < dd->num_pports; i++, ppd++) { 2193 ppd->ibport_data.rvp.z_rc_acks = 2194 get_all_cpu_total(ppd->ibport_data.rvp.rc_acks); 2195 ppd->ibport_data.rvp.z_rc_qacks = 2196 get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks); 2197 } 2198 } 2199 2200 /* Control LED state */ 2201 static inline void setextled(struct hfi1_devdata *dd, u32 on) 2202 { 2203 if (on) 2204 write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F); 2205 else 2206 write_csr(dd, DCC_CFG_LED_CNTRL, 0x10); 2207 } 2208 2209 /* return the i2c resource given the target */ 2210 static inline u32 i2c_target(u32 target) 2211 { 2212 return target ? CR_I2C2 : CR_I2C1; 2213 } 2214 2215 /* return the i2c chain chip resource that this HFI uses for QSFP */ 2216 static inline u32 qsfp_resource(struct hfi1_devdata *dd) 2217 { 2218 return i2c_target(dd->hfi1_id); 2219 } 2220 2221 /* Is this device integrated or discrete? */ 2222 static inline bool is_integrated(struct hfi1_devdata *dd) 2223 { 2224 return dd->pcidev->device == PCI_DEVICE_ID_INTEL1; 2225 } 2226 2227 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); 2228 2229 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev)) 2230 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev)) 2231 2232 static inline void hfi1_update_ah_attr(struct ib_device *ibdev, 2233 struct rdma_ah_attr *attr) 2234 { 2235 struct hfi1_pportdata *ppd; 2236 struct hfi1_ibport *ibp; 2237 u32 dlid = rdma_ah_get_dlid(attr); 2238 2239 /* 2240 * Kernel clients may not have setup GRH information 2241 * Set that here. 2242 */ 2243 ibp = to_iport(ibdev, rdma_ah_get_port_num(attr)); 2244 ppd = ppd_from_ibp(ibp); 2245 if ((((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) || 2246 (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) && 2247 (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)) && 2248 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) && 2249 (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))) || 2250 (rdma_ah_get_make_grd(attr))) { 2251 rdma_ah_set_ah_flags(attr, IB_AH_GRH); 2252 rdma_ah_set_interface_id(attr, OPA_MAKE_ID(dlid)); 2253 rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix); 2254 } 2255 } 2256 2257 /* 2258 * hfi1_check_mcast- Check if the given lid is 2259 * in the OPA multicast range. 2260 * 2261 * The LID might either reside in ah.dlid or might be 2262 * in the GRH of the address handle as DGID if extended 2263 * addresses are in use. 2264 */ 2265 static inline bool hfi1_check_mcast(u32 lid) 2266 { 2267 return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) && 2268 (lid != be32_to_cpu(OPA_LID_PERMISSIVE))); 2269 } 2270 2271 #define opa_get_lid(lid, format) \ 2272 __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format) 2273 2274 /* Convert a lid to a specific lid space */ 2275 static inline u32 __opa_get_lid(u32 lid, u8 format) 2276 { 2277 bool is_mcast = hfi1_check_mcast(lid); 2278 2279 switch (format) { 2280 case OPA_PORT_PACKET_FORMAT_8B: 2281 case OPA_PORT_PACKET_FORMAT_10B: 2282 if (is_mcast) 2283 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + 2284 0xF0000); 2285 return lid & 0xFFFFF; 2286 case OPA_PORT_PACKET_FORMAT_16B: 2287 if (is_mcast) 2288 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + 2289 0xF00000); 2290 return lid & 0xFFFFFF; 2291 case OPA_PORT_PACKET_FORMAT_9B: 2292 if (is_mcast) 2293 return (lid - 2294 opa_get_mcast_base(OPA_MCAST_NR) + 2295 be16_to_cpu(IB_MULTICAST_LID_BASE)); 2296 else 2297 return lid & 0xFFFF; 2298 default: 2299 return lid; 2300 } 2301 } 2302 2303 /* Return true if the given lid is the OPA 16B multicast range */ 2304 static inline bool hfi1_is_16B_mcast(u32 lid) 2305 { 2306 return ((lid >= 2307 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) && 2308 (lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))); 2309 } 2310 2311 static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr) 2312 { 2313 const struct ib_global_route *grh = rdma_ah_read_grh(attr); 2314 u32 dlid = rdma_ah_get_dlid(attr); 2315 2316 /* Modify ah_attr.dlid to be in the 32 bit LID space. 2317 * This is how the address will be laid out: 2318 * Assuming MCAST_NR to be 4, 2319 * 32 bit permissive LID = 0xFFFFFFFF 2320 * Multicast LID range = 0xFFFFFFFE to 0xF0000000 2321 * Unicast LID range = 0xEFFFFFFF to 1 2322 * Invalid LID = 0 2323 */ 2324 if (ib_is_opa_gid(&grh->dgid)) 2325 dlid = opa_get_lid_from_gid(&grh->dgid); 2326 else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 2327 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) && 2328 (dlid != be32_to_cpu(OPA_LID_PERMISSIVE))) 2329 dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) + 2330 opa_get_mcast_base(OPA_MCAST_NR); 2331 else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) 2332 dlid = be32_to_cpu(OPA_LID_PERMISSIVE); 2333 2334 rdma_ah_set_dlid(attr, dlid); 2335 } 2336 2337 static inline u8 hfi1_get_packet_type(u32 lid) 2338 { 2339 /* 9B if lid > 0xF0000000 */ 2340 if (lid >= opa_get_mcast_base(OPA_MCAST_NR)) 2341 return HFI1_PKT_TYPE_9B; 2342 2343 /* 16B if lid > 0xC000 */ 2344 if (lid >= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 9B)) 2345 return HFI1_PKT_TYPE_16B; 2346 2347 return HFI1_PKT_TYPE_9B; 2348 } 2349 2350 static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr) 2351 { 2352 /* 2353 * If there was an incoming 16B packet with permissive 2354 * LIDs, OPA GIDs would have been programmed when those 2355 * packets were received. A 16B packet will have to 2356 * be sent in response to that packet. Return a 16B 2357 * header type if that's the case. 2358 */ 2359 if (rdma_ah_get_dlid(attr) == be32_to_cpu(OPA_LID_PERMISSIVE)) 2360 return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ? 2361 HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B; 2362 2363 /* 2364 * Return a 16B header type if either the the destination 2365 * or source lid is extended. 2366 */ 2367 if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B) 2368 return HFI1_PKT_TYPE_16B; 2369 2370 return hfi1_get_packet_type(lid); 2371 } 2372 2373 static inline void hfi1_make_ext_grh(struct hfi1_packet *packet, 2374 struct ib_grh *grh, u32 slid, 2375 u32 dlid) 2376 { 2377 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data; 2378 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 2379 2380 if (!ibp) 2381 return; 2382 2383 grh->hop_limit = 1; 2384 grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; 2385 if (slid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)) 2386 grh->sgid.global.interface_id = 2387 OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE)); 2388 else 2389 grh->sgid.global.interface_id = OPA_MAKE_ID(slid); 2390 2391 /* 2392 * Upper layers (like mad) may compare the dgid in the 2393 * wc that is obtained here with the sgid_index in 2394 * the wr. Since sgid_index in wr is always 0 for 2395 * extended lids, set the dgid here to the default 2396 * IB gid. 2397 */ 2398 grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix; 2399 grh->dgid.global.interface_id = 2400 cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]); 2401 } 2402 2403 static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload) 2404 { 2405 return -(hdr_size + payload + (SIZE_OF_CRC << 2) + 2406 SIZE_OF_LT) & 0x7; 2407 } 2408 2409 static inline void hfi1_make_ib_hdr(struct ib_header *hdr, 2410 u16 lrh0, u16 len, 2411 u16 dlid, u16 slid) 2412 { 2413 hdr->lrh[0] = cpu_to_be16(lrh0); 2414 hdr->lrh[1] = cpu_to_be16(dlid); 2415 hdr->lrh[2] = cpu_to_be16(len); 2416 hdr->lrh[3] = cpu_to_be16(slid); 2417 } 2418 2419 static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr, 2420 u32 slid, u32 dlid, 2421 u16 len, u16 pkey, 2422 bool becn, bool fecn, u8 l4, 2423 u8 sc) 2424 { 2425 u32 lrh0 = 0; 2426 u32 lrh1 = 0x40000000; 2427 u32 lrh2 = 0; 2428 u32 lrh3 = 0; 2429 2430 lrh0 = (lrh0 & ~OPA_16B_BECN_MASK) | (becn << OPA_16B_BECN_SHIFT); 2431 lrh0 = (lrh0 & ~OPA_16B_LEN_MASK) | (len << OPA_16B_LEN_SHIFT); 2432 lrh0 = (lrh0 & ~OPA_16B_LID_MASK) | (slid & OPA_16B_LID_MASK); 2433 lrh1 = (lrh1 & ~OPA_16B_FECN_MASK) | (fecn << OPA_16B_FECN_SHIFT); 2434 lrh1 = (lrh1 & ~OPA_16B_SC_MASK) | (sc << OPA_16B_SC_SHIFT); 2435 lrh1 = (lrh1 & ~OPA_16B_LID_MASK) | (dlid & OPA_16B_LID_MASK); 2436 lrh2 = (lrh2 & ~OPA_16B_SLID_MASK) | 2437 ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT); 2438 lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) | 2439 ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT); 2440 lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT); 2441 lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4; 2442 2443 hdr->lrh[0] = lrh0; 2444 hdr->lrh[1] = lrh1; 2445 hdr->lrh[2] = lrh2; 2446 hdr->lrh[3] = lrh3; 2447 } 2448 #endif /* _HFI1_KERNEL_H */ 2449