1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV architectural definitions 7 * 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved. 9 */ 10 11 #ifndef _ASM_X86_UV_UV_HUB_H 12 #define _ASM_X86_UV_UV_HUB_H 13 14 #ifdef CONFIG_X86_64 15 #include <linux/numa.h> 16 #include <linux/percpu.h> 17 #include <linux/timer.h> 18 #include <linux/io.h> 19 #include <asm/types.h> 20 #include <asm/percpu.h> 21 #include <asm/uv/uv_mmrs.h> 22 #include <asm/irq_vectors.h> 23 #include <asm/io_apic.h> 24 25 26 /* 27 * Addressing Terminology 28 * 29 * M - The low M bits of a physical address represent the offset 30 * into the blade local memory. RAM memory on a blade is physically 31 * contiguous (although various IO spaces may punch holes in 32 * it).. 33 * 34 * N - Number of bits in the node portion of a socket physical 35 * address. 36 * 37 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 38 * routers always have low bit of 1, C/MBricks have low bit 39 * equal to 0. Most addressing macros that target UV hub chips 40 * right shift the NASID by 1 to exclude the always-zero bit. 41 * NASIDs contain up to 15 bits. 42 * 43 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead 44 * of nasids. 45 * 46 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant 47 * of the nasid for socket usage. 48 * 49 * GPA - (global physical address) a socket physical address converted 50 * so that it can be used by the GRU as a global address. Socket 51 * physical addresses 1) need additional NASID (node) bits added 52 * to the high end of the address, and 2) unaliased if the 53 * partition does not have a physical address 0. In addition, on 54 * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40. 55 * 56 * 57 * NumaLink Global Physical Address Format: 58 * +--------------------------------+---------------------+ 59 * |00..000| GNODE | NodeOffset | 60 * +--------------------------------+---------------------+ 61 * |<-------53 - M bits --->|<--------M bits -----> 62 * 63 * M - number of node offset bits (35 .. 40) 64 * 65 * 66 * Memory/UV-HUB Processor Socket Address Format: 67 * +----------------+---------------+---------------------+ 68 * |00..000000000000| PNODE | NodeOffset | 69 * +----------------+---------------+---------------------+ 70 * <--- N bits --->|<--------M bits -----> 71 * 72 * M - number of node offset bits (35 .. 40) 73 * N - number of PNODE bits (0 .. 10) 74 * 75 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). 76 * The actual values are configuration dependent and are set at 77 * boot time. M & N values are set by the hardware/BIOS at boot. 78 * 79 * 80 * APICID format 81 * NOTE!!!!!! This is the current format of the APICID. However, code 82 * should assume that this will change in the future. Use functions 83 * in this file for all APICID bit manipulations and conversion. 84 * 85 * 1111110000000000 86 * 5432109876543210 87 * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg) 88 * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg) 89 * pppppppppppcccch SandyBridge (15 bits in hdw reg) 90 * sssssssssss 91 * 92 * p = pnode bits 93 * l = socket number on board 94 * c = core 95 * h = hyperthread 96 * s = bits that are in the SOCKET_ID CSR 97 * 98 * Note: Processor may support fewer bits in the APICID register. The ACPI 99 * tables hold all 16 bits. Software needs to be aware of this. 100 * 101 * Unless otherwise specified, all references to APICID refer to 102 * the FULL value contained in ACPI tables, not the subset in the 103 * processor APICID register. 104 */ 105 106 107 /* 108 * Maximum number of bricks in all partitions and in all coherency domains. 109 * This is the total number of bricks accessible in the numalink fabric. It 110 * includes all C & M bricks. Routers are NOT included. 111 * 112 * This value is also the value of the maximum number of non-router NASIDs 113 * in the numalink fabric. 114 * 115 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused. 116 */ 117 #define UV_MAX_NUMALINK_BLADES 16384 118 119 /* 120 * Maximum number of C/Mbricks within a software SSI (hardware may support 121 * more). 122 */ 123 #define UV_MAX_SSI_BLADES 256 124 125 /* 126 * The largest possible NASID of a C or M brick (+ 2) 127 */ 128 #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2) 129 130 struct uv_scir_s { 131 struct timer_list timer; 132 unsigned long offset; 133 unsigned long last; 134 unsigned long idle_on; 135 unsigned long idle_off; 136 unsigned char state; 137 unsigned char enabled; 138 }; 139 140 /* 141 * The following defines attributes of the HUB chip. These attributes are 142 * frequently referenced and are kept in the per-cpu data areas of each cpu. 143 * They are kept together in a struct to minimize cache misses. 144 */ 145 struct uv_hub_info_s { 146 unsigned long global_mmr_base; 147 unsigned long gpa_mask; 148 unsigned int gnode_extra; 149 unsigned char hub_revision; 150 unsigned char apic_pnode_shift; 151 unsigned char m_shift; 152 unsigned char n_lshift; 153 unsigned long gnode_upper; 154 unsigned long lowmem_remap_top; 155 unsigned long lowmem_remap_base; 156 unsigned short pnode; 157 unsigned short pnode_mask; 158 unsigned short coherency_domain_number; 159 unsigned short numa_blade_id; 160 unsigned char blade_processor_id; 161 unsigned char m_val; 162 unsigned char n_val; 163 struct uv_scir_s scir; 164 }; 165 166 DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 167 #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) 168 #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 169 170 /* 171 * Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2 172 * hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE. 173 * This is a software convention - NOT the hardware revision numbers in 174 * the hub chip. 175 */ 176 #define UV1_HUB_REVISION_BASE 1 177 #define UV2_HUB_REVISION_BASE 3 178 #define UV3_HUB_REVISION_BASE 5 179 180 static inline int is_uv1_hub(void) 181 { 182 return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE; 183 } 184 185 static inline int is_uv2_hub(void) 186 { 187 return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) && 188 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE)); 189 } 190 191 static inline int is_uv3_hub(void) 192 { 193 return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE; 194 } 195 196 static inline int is_uv_hub(void) 197 { 198 return uv_hub_info->hub_revision; 199 } 200 201 /* code common to uv2 and uv3 only */ 202 static inline int is_uvx_hub(void) 203 { 204 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE; 205 } 206 207 static inline int is_uv2_1_hub(void) 208 { 209 return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE; 210 } 211 212 static inline int is_uv2_2_hub(void) 213 { 214 return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE + 1; 215 } 216 217 union uvh_apicid { 218 unsigned long v; 219 struct uvh_apicid_s { 220 unsigned long local_apic_mask : 24; 221 unsigned long local_apic_shift : 5; 222 unsigned long unused1 : 3; 223 unsigned long pnode_mask : 24; 224 unsigned long pnode_shift : 5; 225 unsigned long unused2 : 3; 226 } s; 227 }; 228 229 /* 230 * Local & Global MMR space macros. 231 * Note: macros are intended to be used ONLY by inline functions 232 * in this file - not by other kernel code. 233 * n - NASID (full 15-bit global nasid) 234 * g - GNODE (full 15-bit global nasid, right shifted 1) 235 * p - PNODE (local part of nsids, right shifted 1) 236 */ 237 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) 238 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) 239 #define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) 240 241 #define UV1_LOCAL_MMR_BASE 0xf4000000UL 242 #define UV1_GLOBAL_MMR32_BASE 0xf8000000UL 243 #define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024) 244 #define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) 245 246 #define UV2_LOCAL_MMR_BASE 0xfa000000UL 247 #define UV2_GLOBAL_MMR32_BASE 0xfc000000UL 248 #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 249 #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 250 251 #define UV3_LOCAL_MMR_BASE 0xfa000000UL 252 #define UV3_GLOBAL_MMR32_BASE 0xfc000000UL 253 #define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 254 #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 255 256 #define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \ 257 (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \ 258 UV3_LOCAL_MMR_BASE)) 259 #define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\ 260 (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\ 261 UV3_GLOBAL_MMR32_BASE)) 262 #define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ 263 (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \ 264 UV3_LOCAL_MMR_SIZE)) 265 #define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\ 266 (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\ 267 UV3_GLOBAL_MMR32_SIZE)) 268 #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 269 270 #define UV_GLOBAL_GRU_MMR_BASE 0x4000000 271 272 #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 273 #define UV_GLOBAL_MMR64_PNODE_SHIFT 26 274 275 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) 276 277 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 278 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 279 280 #define UVH_APICID 0x002D0E00L 281 #define UV_APIC_PNODE_SHIFT 6 282 283 #define UV_APICID_HIBIT_MASK 0xffff0000 284 285 /* Local Bus from cpu's perspective */ 286 #define LOCAL_BUS_BASE 0x1c00000 287 #define LOCAL_BUS_SIZE (4 * 1024 * 1024) 288 289 /* 290 * System Controller Interface Reg 291 * 292 * Note there are NO leds on a UV system. This register is only 293 * used by the system controller to monitor system-wide operation. 294 * There are 64 regs per node. With Nahelem cpus (2 cores per node, 295 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on 296 * a node. 297 * 298 * The window is located at top of ACPI MMR space 299 */ 300 #define SCIR_WINDOW_COUNT 64 301 #define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \ 302 LOCAL_BUS_SIZE - \ 303 SCIR_WINDOW_COUNT) 304 305 #define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */ 306 #define SCIR_CPU_ACTIVITY 0x02 /* not idle */ 307 #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ 308 309 /* Loop through all installed blades */ 310 #define for_each_possible_blade(bid) \ 311 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++) 312 313 /* 314 * Macros for converting between kernel virtual addresses, socket local physical 315 * addresses, and UV global physical addresses. 316 * Note: use the standard __pa() & __va() macros for converting 317 * between socket virtual and socket physical addresses. 318 */ 319 320 /* socket phys RAM --> UV global physical address */ 321 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) 322 { 323 if (paddr < uv_hub_info->lowmem_remap_top) 324 paddr |= uv_hub_info->lowmem_remap_base; 325 paddr |= uv_hub_info->gnode_upper; 326 paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | 327 ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift); 328 return paddr; 329 } 330 331 332 /* socket virtual --> UV global physical address */ 333 static inline unsigned long uv_gpa(void *v) 334 { 335 return uv_soc_phys_ram_to_gpa(__pa(v)); 336 } 337 338 /* Top two bits indicate the requested address is in MMR space. */ 339 static inline int 340 uv_gpa_in_mmr_space(unsigned long gpa) 341 { 342 return (gpa >> 62) == 0x3UL; 343 } 344 345 /* UV global physical address --> socket phys RAM */ 346 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) 347 { 348 unsigned long paddr; 349 unsigned long remap_base = uv_hub_info->lowmem_remap_base; 350 unsigned long remap_top = uv_hub_info->lowmem_remap_top; 351 352 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | 353 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); 354 paddr = gpa & uv_hub_info->gpa_mask; 355 if (paddr >= remap_base && paddr < remap_base + remap_top) 356 paddr -= remap_base; 357 return paddr; 358 } 359 360 361 /* gpa -> pnode */ 362 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) 363 { 364 return gpa >> uv_hub_info->n_lshift; 365 } 366 367 /* gpa -> pnode */ 368 static inline int uv_gpa_to_pnode(unsigned long gpa) 369 { 370 unsigned long n_mask = (1UL << uv_hub_info->n_val) - 1; 371 372 return uv_gpa_to_gnode(gpa) & n_mask; 373 } 374 375 /* gpa -> node offset*/ 376 static inline unsigned long uv_gpa_to_offset(unsigned long gpa) 377 { 378 return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift; 379 } 380 381 /* pnode, offset --> socket virtual */ 382 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) 383 { 384 return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset); 385 } 386 387 388 /* 389 * Extract a PNODE from an APICID (full apicid, not processor subset) 390 */ 391 static inline int uv_apicid_to_pnode(int apicid) 392 { 393 return (apicid >> uv_hub_info->apic_pnode_shift); 394 } 395 396 /* 397 * Convert an apicid to the socket number on the blade 398 */ 399 static inline int uv_apicid_to_socket(int apicid) 400 { 401 if (is_uv1_hub()) 402 return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1; 403 else 404 return 0; 405 } 406 407 /* 408 * Access global MMRs using the low memory MMR32 space. This region supports 409 * faster MMR access but not all MMRs are accessible in this space. 410 */ 411 static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset) 412 { 413 return __va(UV_GLOBAL_MMR32_BASE | 414 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); 415 } 416 417 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) 418 { 419 writeq(val, uv_global_mmr32_address(pnode, offset)); 420 } 421 422 static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) 423 { 424 return readq(uv_global_mmr32_address(pnode, offset)); 425 } 426 427 /* 428 * Access Global MMR space using the MMR space located at the top of physical 429 * memory. 430 */ 431 static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset) 432 { 433 return __va(UV_GLOBAL_MMR64_BASE | 434 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 435 } 436 437 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) 438 { 439 writeq(val, uv_global_mmr64_address(pnode, offset)); 440 } 441 442 static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) 443 { 444 return readq(uv_global_mmr64_address(pnode, offset)); 445 } 446 447 /* 448 * Global MMR space addresses when referenced by the GRU. (GRU does 449 * NOT use socket addressing). 450 */ 451 static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset) 452 { 453 return UV_GLOBAL_GRU_MMR_BASE | offset | 454 ((unsigned long)pnode << uv_hub_info->m_val); 455 } 456 457 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) 458 { 459 writeb(val, uv_global_mmr64_address(pnode, offset)); 460 } 461 462 static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset) 463 { 464 return readb(uv_global_mmr64_address(pnode, offset)); 465 } 466 467 /* 468 * Access hub local MMRs. Faster than using global space but only local MMRs 469 * are accessible. 470 */ 471 static inline unsigned long *uv_local_mmr_address(unsigned long offset) 472 { 473 return __va(UV_LOCAL_MMR_BASE | offset); 474 } 475 476 static inline unsigned long uv_read_local_mmr(unsigned long offset) 477 { 478 return readq(uv_local_mmr_address(offset)); 479 } 480 481 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) 482 { 483 writeq(val, uv_local_mmr_address(offset)); 484 } 485 486 static inline unsigned char uv_read_local_mmr8(unsigned long offset) 487 { 488 return readb(uv_local_mmr_address(offset)); 489 } 490 491 static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) 492 { 493 writeb(val, uv_local_mmr_address(offset)); 494 } 495 496 /* 497 * Structures and definitions for converting between cpu, node, pnode, and blade 498 * numbers. 499 */ 500 struct uv_blade_info { 501 unsigned short nr_possible_cpus; 502 unsigned short nr_online_cpus; 503 unsigned short pnode; 504 short memory_nid; 505 spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ 506 unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ 507 }; 508 extern struct uv_blade_info *uv_blade_info; 509 extern short *uv_node_to_blade; 510 extern short *uv_cpu_to_blade; 511 extern short uv_possible_blades; 512 513 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ 514 static inline int uv_blade_processor_id(void) 515 { 516 return uv_hub_info->blade_processor_id; 517 } 518 519 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ 520 static inline int uv_numa_blade_id(void) 521 { 522 return uv_hub_info->numa_blade_id; 523 } 524 525 /* Convert a cpu number to the the UV blade number */ 526 static inline int uv_cpu_to_blade_id(int cpu) 527 { 528 return uv_cpu_to_blade[cpu]; 529 } 530 531 /* Convert linux node number to the UV blade number */ 532 static inline int uv_node_to_blade_id(int nid) 533 { 534 return uv_node_to_blade[nid]; 535 } 536 537 /* Convert a blade id to the PNODE of the blade */ 538 static inline int uv_blade_to_pnode(int bid) 539 { 540 return uv_blade_info[bid].pnode; 541 } 542 543 /* Nid of memory node on blade. -1 if no blade-local memory */ 544 static inline int uv_blade_to_memory_nid(int bid) 545 { 546 return uv_blade_info[bid].memory_nid; 547 } 548 549 /* Determine the number of possible cpus on a blade */ 550 static inline int uv_blade_nr_possible_cpus(int bid) 551 { 552 return uv_blade_info[bid].nr_possible_cpus; 553 } 554 555 /* Determine the number of online cpus on a blade */ 556 static inline int uv_blade_nr_online_cpus(int bid) 557 { 558 return uv_blade_info[bid].nr_online_cpus; 559 } 560 561 /* Convert a cpu id to the PNODE of the blade containing the cpu */ 562 static inline int uv_cpu_to_pnode(int cpu) 563 { 564 return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode; 565 } 566 567 /* Convert a linux node number to the PNODE of the blade */ 568 static inline int uv_node_to_pnode(int nid) 569 { 570 return uv_blade_info[uv_node_to_blade_id(nid)].pnode; 571 } 572 573 /* Maximum possible number of blades */ 574 static inline int uv_num_possible_blades(void) 575 { 576 return uv_possible_blades; 577 } 578 579 /* Per Hub NMI support */ 580 extern void uv_nmi_setup(void); 581 582 /* BMC sets a bit this MMR non-zero before sending an NMI */ 583 #define UVH_NMI_MMR UVH_SCRATCH5 584 #define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS 585 #define UVH_NMI_MMR_SHIFT 63 586 #define UVH_NMI_MMR_TYPE "SCRATCH5" 587 588 /* Newer SMM NMI handler, not present in all systems */ 589 #define UVH_NMI_MMRX UVH_EVENT_OCCURRED0 590 #define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS 591 #define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \ 592 UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\ 593 UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT) 594 #define UVH_NMI_MMRX_TYPE "EXTIO_INT0" 595 596 /* Non-zero indicates newer SMM NMI handler present */ 597 #define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST 598 599 /* Indicates to BIOS that we want to use the newer SMM NMI handler */ 600 #define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2 601 #define UVH_NMI_MMRX_REQ_SHIFT 62 602 603 struct uv_hub_nmi_s { 604 raw_spinlock_t nmi_lock; 605 atomic_t in_nmi; /* flag this node in UV NMI IRQ */ 606 atomic_t cpu_owner; /* last locker of this struct */ 607 atomic_t read_mmr_count; /* count of MMR reads */ 608 atomic_t nmi_count; /* count of true UV NMIs */ 609 unsigned long nmi_value; /* last value read from NMI MMR */ 610 }; 611 612 struct uv_cpu_nmi_s { 613 struct uv_hub_nmi_s *hub; 614 atomic_t state; 615 atomic_t pinging; 616 int queries; 617 int pings; 618 }; 619 620 DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); 621 #define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi)) 622 #define uv_hub_nmi (uv_cpu_nmi.hub) 623 #define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu)) 624 #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) 625 626 /* uv_cpu_nmi_states */ 627 #define UV_NMI_STATE_OUT 0 628 #define UV_NMI_STATE_IN 1 629 #define UV_NMI_STATE_DUMP 2 630 #define UV_NMI_STATE_DUMP_DONE 3 631 632 /* Update SCIR state */ 633 static inline void uv_set_scir_bits(unsigned char value) 634 { 635 if (uv_hub_info->scir.state != value) { 636 uv_hub_info->scir.state = value; 637 uv_write_local_mmr8(uv_hub_info->scir.offset, value); 638 } 639 } 640 641 static inline unsigned long uv_scir_offset(int apicid) 642 { 643 return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); 644 } 645 646 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) 647 { 648 if (uv_cpu_hub_info(cpu)->scir.state != value) { 649 uv_write_global_mmr8(uv_cpu_to_pnode(cpu), 650 uv_cpu_hub_info(cpu)->scir.offset, value); 651 uv_cpu_hub_info(cpu)->scir.state = value; 652 } 653 } 654 655 extern unsigned int uv_apicid_hibits; 656 static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) 657 { 658 apicid |= uv_apicid_hibits; 659 return (1UL << UVH_IPI_INT_SEND_SHFT) | 660 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | 661 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | 662 (vector << UVH_IPI_INT_VECTOR_SHFT); 663 } 664 665 static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) 666 { 667 unsigned long val; 668 unsigned long dmode = dest_Fixed; 669 670 if (vector == NMI_VECTOR) 671 dmode = dest_NMI; 672 673 val = uv_hub_ipi_value(apicid, vector, dmode); 674 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 675 } 676 677 /* 678 * Get the minimum revision number of the hub chips within the partition. 679 * 1 - UV1 rev 1.0 initial silicon 680 * 2 - UV1 rev 2.0 production silicon 681 * 3 - UV2 rev 1.0 initial silicon 682 * 5 - UV3 rev 1.0 initial silicon 683 */ 684 static inline int uv_get_min_hub_revision_id(void) 685 { 686 return uv_hub_info->hub_revision; 687 } 688 689 #endif /* CONFIG_X86_64 */ 690 #endif /* _ASM_X86_UV_UV_HUB_H */ 691