1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV architectural definitions 7 * 8 * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. 9 */ 10 11 #ifndef _ASM_X86_UV_UV_HUB_H 12 #define _ASM_X86_UV_UV_HUB_H 13 14 #ifdef CONFIG_X86_64 15 #include <linux/numa.h> 16 #include <linux/percpu.h> 17 #include <linux/timer.h> 18 #include <linux/io.h> 19 #include <linux/topology.h> 20 #include <asm/types.h> 21 #include <asm/percpu.h> 22 #include <asm/uv/uv_mmrs.h> 23 #include <asm/uv/bios.h> 24 #include <asm/irq_vectors.h> 25 #include <asm/io_apic.h> 26 27 28 /* 29 * Addressing Terminology 30 * 31 * M - The low M bits of a physical address represent the offset 32 * into the blade local memory. RAM memory on a blade is physically 33 * contiguous (although various IO spaces may punch holes in 34 * it).. 35 * 36 * N - Number of bits in the node portion of a socket physical 37 * address. 38 * 39 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 40 * routers always have low bit of 1, C/MBricks have low bit 41 * equal to 0. Most addressing macros that target UV hub chips 42 * right shift the NASID by 1 to exclude the always-zero bit. 43 * NASIDs contain up to 15 bits. 44 * 45 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead 46 * of nasids. 47 * 48 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant 49 * of the nasid for socket usage. 50 * 51 * GPA - (global physical address) a socket physical address converted 52 * so that it can be used by the GRU as a global address. Socket 53 * physical addresses 1) need additional NASID (node) bits added 54 * to the high end of the address, and 2) unaliased if the 55 * partition does not have a physical address 0. In addition, on 56 * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40. 57 * 58 * 59 * NumaLink Global Physical Address Format: 60 * +--------------------------------+---------------------+ 61 * |00..000| GNODE | NodeOffset | 62 * +--------------------------------+---------------------+ 63 * |<-------53 - M bits --->|<--------M bits -----> 64 * 65 * M - number of node offset bits (35 .. 40) 66 * 67 * 68 * Memory/UV-HUB Processor Socket Address Format: 69 * +----------------+---------------+---------------------+ 70 * |00..000000000000| PNODE | NodeOffset | 71 * +----------------+---------------+---------------------+ 72 * <--- N bits --->|<--------M bits -----> 73 * 74 * M - number of node offset bits (35 .. 40) 75 * N - number of PNODE bits (0 .. 10) 76 * 77 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). 78 * The actual values are configuration dependent and are set at 79 * boot time. M & N values are set by the hardware/BIOS at boot. 80 * 81 * 82 * APICID format 83 * NOTE!!!!!! This is the current format of the APICID. However, code 84 * should assume that this will change in the future. Use functions 85 * in this file for all APICID bit manipulations and conversion. 86 * 87 * 1111110000000000 88 * 5432109876543210 89 * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg) 90 * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg) 91 * pppppppppppcccch SandyBridge (15 bits in hdw reg) 92 * sssssssssss 93 * 94 * p = pnode bits 95 * l = socket number on board 96 * c = core 97 * h = hyperthread 98 * s = bits that are in the SOCKET_ID CSR 99 * 100 * Note: Processor may support fewer bits in the APICID register. The ACPI 101 * tables hold all 16 bits. Software needs to be aware of this. 102 * 103 * Unless otherwise specified, all references to APICID refer to 104 * the FULL value contained in ACPI tables, not the subset in the 105 * processor APICID register. 106 */ 107 108 /* 109 * Maximum number of bricks in all partitions and in all coherency domains. 110 * This is the total number of bricks accessible in the numalink fabric. It 111 * includes all C & M bricks. Routers are NOT included. 112 * 113 * This value is also the value of the maximum number of non-router NASIDs 114 * in the numalink fabric. 115 * 116 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused. 117 */ 118 #define UV_MAX_NUMALINK_BLADES 16384 119 120 /* 121 * Maximum number of C/Mbricks within a software SSI (hardware may support 122 * more). 123 */ 124 #define UV_MAX_SSI_BLADES 256 125 126 /* 127 * The largest possible NASID of a C or M brick (+ 2) 128 */ 129 #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2) 130 131 /* System Controller Interface Reg info */ 132 struct uv_scir_s { 133 struct timer_list timer; 134 unsigned long offset; 135 unsigned long last; 136 unsigned long idle_on; 137 unsigned long idle_off; 138 unsigned char state; 139 unsigned char enabled; 140 }; 141 142 /* GAM (globally addressed memory) range table */ 143 struct uv_gam_range_s { 144 u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */ 145 u16 nasid; /* node's global physical address */ 146 s8 base; /* entry index of node's base addr */ 147 u8 reserved; 148 }; 149 150 /* 151 * The following defines attributes of the HUB chip. These attributes are 152 * frequently referenced and are kept in a common per hub struct. 153 * After setup, the struct is read only, so it should be readily 154 * available in the L3 cache on the cpu socket for the node. 155 */ 156 struct uv_hub_info_s { 157 unsigned long global_mmr_base; 158 unsigned long global_mmr_shift; 159 unsigned long gpa_mask; 160 unsigned short *socket_to_node; 161 unsigned short *socket_to_pnode; 162 unsigned short *pnode_to_socket; 163 struct uv_gam_range_s *gr_table; 164 unsigned short min_socket; 165 unsigned short min_pnode; 166 unsigned char m_val; 167 unsigned char n_val; 168 unsigned char gr_table_len; 169 unsigned char hub_revision; 170 unsigned char apic_pnode_shift; 171 unsigned char gpa_shift; 172 unsigned char m_shift; 173 unsigned char n_lshift; 174 unsigned int gnode_extra; 175 unsigned long gnode_upper; 176 unsigned long lowmem_remap_top; 177 unsigned long lowmem_remap_base; 178 unsigned long global_gru_base; 179 unsigned long global_gru_shift; 180 unsigned short pnode; 181 unsigned short pnode_mask; 182 unsigned short coherency_domain_number; 183 unsigned short numa_blade_id; 184 unsigned short nr_possible_cpus; 185 unsigned short nr_online_cpus; 186 short memory_nid; 187 }; 188 189 /* CPU specific info with a pointer to the hub common info struct */ 190 struct uv_cpu_info_s { 191 void *p_uv_hub_info; 192 unsigned char blade_cpu_id; 193 struct uv_scir_s scir; 194 }; 195 DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info); 196 197 #define uv_cpu_info this_cpu_ptr(&__uv_cpu_info) 198 #define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu)) 199 200 #define uv_scir_info (&uv_cpu_info->scir) 201 #define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir) 202 203 /* Node specific hub common info struct */ 204 extern void **__uv_hub_info_list; 205 static inline struct uv_hub_info_s *uv_hub_info_list(int node) 206 { 207 return (struct uv_hub_info_s *)__uv_hub_info_list[node]; 208 } 209 210 static inline struct uv_hub_info_s *_uv_hub_info(void) 211 { 212 return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info; 213 } 214 #define uv_hub_info _uv_hub_info() 215 216 static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu) 217 { 218 return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info; 219 } 220 221 #define UV_HUB_INFO_VERSION 0x7150 222 extern int uv_hub_info_version(void); 223 static inline int uv_hub_info_check(int version) 224 { 225 if (uv_hub_info_version() == version) 226 return 0; 227 228 pr_crit("UV: uv_hub_info version(%x) mismatch, expecting(%x)\n", 229 uv_hub_info_version(), version); 230 231 BUG(); /* Catastrophic - cannot continue on unknown UV system */ 232 } 233 #define _uv_hub_info_check() uv_hub_info_check(UV_HUB_INFO_VERSION) 234 235 /* 236 * HUB revision ranges for each UV HUB architecture. 237 * This is a software convention - NOT the hardware revision numbers in 238 * the hub chip. 239 */ 240 #define UV1_HUB_REVISION_BASE 1 241 #define UV2_HUB_REVISION_BASE 3 242 #define UV3_HUB_REVISION_BASE 5 243 #define UV4_HUB_REVISION_BASE 7 244 245 #ifdef UV1_HUB_IS_SUPPORTED 246 static inline int is_uv1_hub(void) 247 { 248 return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE; 249 } 250 #else 251 static inline int is_uv1_hub(void) 252 { 253 return 0; 254 } 255 #endif 256 257 #ifdef UV2_HUB_IS_SUPPORTED 258 static inline int is_uv2_hub(void) 259 { 260 return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) && 261 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE)); 262 } 263 #else 264 static inline int is_uv2_hub(void) 265 { 266 return 0; 267 } 268 #endif 269 270 #ifdef UV3_HUB_IS_SUPPORTED 271 static inline int is_uv3_hub(void) 272 { 273 return ((uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE) && 274 (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE)); 275 } 276 #else 277 static inline int is_uv3_hub(void) 278 { 279 return 0; 280 } 281 #endif 282 283 #ifdef UV4_HUB_IS_SUPPORTED 284 static inline int is_uv4_hub(void) 285 { 286 return uv_hub_info->hub_revision >= UV4_HUB_REVISION_BASE; 287 } 288 #else 289 static inline int is_uv4_hub(void) 290 { 291 return 0; 292 } 293 #endif 294 295 static inline int is_uvx_hub(void) 296 { 297 if (uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) 298 return uv_hub_info->hub_revision; 299 300 return 0; 301 } 302 303 static inline int is_uv_hub(void) 304 { 305 #ifdef UV1_HUB_IS_SUPPORTED 306 return uv_hub_info->hub_revision; 307 #endif 308 return is_uvx_hub(); 309 } 310 311 union uvh_apicid { 312 unsigned long v; 313 struct uvh_apicid_s { 314 unsigned long local_apic_mask : 24; 315 unsigned long local_apic_shift : 5; 316 unsigned long unused1 : 3; 317 unsigned long pnode_mask : 24; 318 unsigned long pnode_shift : 5; 319 unsigned long unused2 : 3; 320 } s; 321 }; 322 323 /* 324 * Local & Global MMR space macros. 325 * Note: macros are intended to be used ONLY by inline functions 326 * in this file - not by other kernel code. 327 * n - NASID (full 15-bit global nasid) 328 * g - GNODE (full 15-bit global nasid, right shifted 1) 329 * p - PNODE (local part of nsids, right shifted 1) 330 */ 331 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) 332 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) 333 #define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) 334 335 #define UV1_LOCAL_MMR_BASE 0xf4000000UL 336 #define UV1_GLOBAL_MMR32_BASE 0xf8000000UL 337 #define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024) 338 #define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) 339 340 #define UV2_LOCAL_MMR_BASE 0xfa000000UL 341 #define UV2_GLOBAL_MMR32_BASE 0xfc000000UL 342 #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 343 #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 344 345 #define UV3_LOCAL_MMR_BASE 0xfa000000UL 346 #define UV3_GLOBAL_MMR32_BASE 0xfc000000UL 347 #define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 348 #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 349 350 #define UV4_LOCAL_MMR_BASE 0xfa000000UL 351 #define UV4_GLOBAL_MMR32_BASE 0xfc000000UL 352 #define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 353 #define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024) 354 355 #define UV_LOCAL_MMR_BASE ( \ 356 is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \ 357 is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \ 358 is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \ 359 /*is_uv4_hub*/ UV4_LOCAL_MMR_BASE) 360 361 #define UV_GLOBAL_MMR32_BASE ( \ 362 is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \ 363 is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \ 364 is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \ 365 /*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE) 366 367 #define UV_LOCAL_MMR_SIZE ( \ 368 is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ 369 is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \ 370 is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \ 371 /*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE) 372 373 #define UV_GLOBAL_MMR32_SIZE ( \ 374 is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \ 375 is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \ 376 is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \ 377 /*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE) 378 379 #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 380 381 #define UV_GLOBAL_GRU_MMR_BASE 0x4000000 382 383 #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 384 #define _UV_GLOBAL_MMR64_PNODE_SHIFT 26 385 #define UV_GLOBAL_MMR64_PNODE_SHIFT (uv_hub_info->global_mmr_shift) 386 387 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) 388 389 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 390 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 391 392 #define UVH_APICID 0x002D0E00L 393 #define UV_APIC_PNODE_SHIFT 6 394 395 #define UV_APICID_HIBIT_MASK 0xffff0000 396 397 /* Local Bus from cpu's perspective */ 398 #define LOCAL_BUS_BASE 0x1c00000 399 #define LOCAL_BUS_SIZE (4 * 1024 * 1024) 400 401 /* 402 * System Controller Interface Reg 403 * 404 * Note there are NO leds on a UV system. This register is only 405 * used by the system controller to monitor system-wide operation. 406 * There are 64 regs per node. With Nahelem cpus (2 cores per node, 407 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on 408 * a node. 409 * 410 * The window is located at top of ACPI MMR space 411 */ 412 #define SCIR_WINDOW_COUNT 64 413 #define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \ 414 LOCAL_BUS_SIZE - \ 415 SCIR_WINDOW_COUNT) 416 417 #define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */ 418 #define SCIR_CPU_ACTIVITY 0x02 /* not idle */ 419 #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ 420 421 /* Loop through all installed blades */ 422 #define for_each_possible_blade(bid) \ 423 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++) 424 425 /* 426 * Macros for converting between kernel virtual addresses, socket local physical 427 * addresses, and UV global physical addresses. 428 * Note: use the standard __pa() & __va() macros for converting 429 * between socket virtual and socket physical addresses. 430 */ 431 432 /* global bits offset - number of local address bits in gpa for this UV arch */ 433 static inline unsigned int uv_gpa_shift(void) 434 { 435 return uv_hub_info->gpa_shift; 436 } 437 #define _uv_gpa_shift 438 439 /* Find node that has the address range that contains global address */ 440 static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa) 441 { 442 struct uv_gam_range_s *gr = uv_hub_info->gr_table; 443 unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT; 444 int i, num = uv_hub_info->gr_table_len; 445 446 if (gr) { 447 for (i = 0; i < num; i++, gr++) { 448 if (pal < gr->limit) 449 return gr; 450 } 451 } 452 pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr); 453 BUG(); 454 } 455 456 /* Return base address of node that contains global address */ 457 static inline unsigned long uv_gam_range_base(unsigned long pa) 458 { 459 struct uv_gam_range_s *gr = uv_gam_range(pa); 460 int base = gr->base; 461 462 if (base < 0) 463 return 0UL; 464 465 return uv_hub_info->gr_table[base].limit; 466 } 467 468 /* socket phys RAM --> UV global NASID (UV4+) */ 469 static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr) 470 { 471 return uv_gam_range(paddr)->nasid; 472 } 473 #define _uv_soc_phys_ram_to_nasid 474 475 /* socket virtual --> UV global NASID (UV4+) */ 476 static inline unsigned long uv_gpa_nasid(void *v) 477 { 478 return uv_soc_phys_ram_to_nasid(__pa(v)); 479 } 480 481 /* socket phys RAM --> UV global physical address */ 482 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) 483 { 484 unsigned int m_val = uv_hub_info->m_val; 485 486 if (paddr < uv_hub_info->lowmem_remap_top) 487 paddr |= uv_hub_info->lowmem_remap_base; 488 489 if (m_val) { 490 paddr |= uv_hub_info->gnode_upper; 491 paddr = ((paddr << uv_hub_info->m_shift) 492 >> uv_hub_info->m_shift) | 493 ((paddr >> uv_hub_info->m_val) 494 << uv_hub_info->n_lshift); 495 } else { 496 paddr |= uv_soc_phys_ram_to_nasid(paddr) 497 << uv_hub_info->gpa_shift; 498 } 499 return paddr; 500 } 501 502 /* socket virtual --> UV global physical address */ 503 static inline unsigned long uv_gpa(void *v) 504 { 505 return uv_soc_phys_ram_to_gpa(__pa(v)); 506 } 507 508 /* Top two bits indicate the requested address is in MMR space. */ 509 static inline int 510 uv_gpa_in_mmr_space(unsigned long gpa) 511 { 512 return (gpa >> 62) == 0x3UL; 513 } 514 515 /* UV global physical address --> socket phys RAM */ 516 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) 517 { 518 unsigned long paddr; 519 unsigned long remap_base = uv_hub_info->lowmem_remap_base; 520 unsigned long remap_top = uv_hub_info->lowmem_remap_top; 521 unsigned int m_val = uv_hub_info->m_val; 522 523 if (m_val) 524 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | 525 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); 526 527 paddr = gpa & uv_hub_info->gpa_mask; 528 if (paddr >= remap_base && paddr < remap_base + remap_top) 529 paddr -= remap_base; 530 return paddr; 531 } 532 533 /* gpa -> gnode */ 534 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) 535 { 536 unsigned int n_lshift = uv_hub_info->n_lshift; 537 538 if (n_lshift) 539 return gpa >> n_lshift; 540 541 return uv_gam_range(gpa)->nasid >> 1; 542 } 543 544 /* gpa -> pnode */ 545 static inline int uv_gpa_to_pnode(unsigned long gpa) 546 { 547 return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask; 548 } 549 550 /* gpa -> node offset */ 551 static inline unsigned long uv_gpa_to_offset(unsigned long gpa) 552 { 553 unsigned int m_shift = uv_hub_info->m_shift; 554 555 if (m_shift) 556 return (gpa << m_shift) >> m_shift; 557 558 return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa); 559 } 560 561 /* Convert socket to node */ 562 static inline int _uv_socket_to_node(int socket, unsigned short *s2nid) 563 { 564 return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket; 565 } 566 567 static inline int uv_socket_to_node(int socket) 568 { 569 return _uv_socket_to_node(socket, uv_hub_info->socket_to_node); 570 } 571 572 /* pnode, offset --> socket virtual */ 573 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) 574 { 575 unsigned int m_val = uv_hub_info->m_val; 576 unsigned long base; 577 unsigned short sockid, node, *p2s; 578 579 if (m_val) 580 return __va(((unsigned long)pnode << m_val) | offset); 581 582 p2s = uv_hub_info->pnode_to_socket; 583 sockid = p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode; 584 node = uv_socket_to_node(sockid); 585 586 /* limit address of previous socket is our base, except node 0 is 0 */ 587 if (!node) 588 return __va((unsigned long)offset); 589 590 base = (unsigned long)(uv_hub_info->gr_table[node - 1].limit); 591 return __va(base << UV_GAM_RANGE_SHFT | offset); 592 } 593 594 /* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */ 595 static inline int uv_apicid_to_pnode(int apicid) 596 { 597 int pnode = apicid >> uv_hub_info->apic_pnode_shift; 598 unsigned short *s2pn = uv_hub_info->socket_to_pnode; 599 600 return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode; 601 } 602 603 /* Convert an apicid to the socket number on the blade */ 604 static inline int uv_apicid_to_socket(int apicid) 605 { 606 if (is_uv1_hub()) 607 return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1; 608 else 609 return 0; 610 } 611 612 /* 613 * Access global MMRs using the low memory MMR32 space. This region supports 614 * faster MMR access but not all MMRs are accessible in this space. 615 */ 616 static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset) 617 { 618 return __va(UV_GLOBAL_MMR32_BASE | 619 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); 620 } 621 622 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) 623 { 624 writeq(val, uv_global_mmr32_address(pnode, offset)); 625 } 626 627 static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) 628 { 629 return readq(uv_global_mmr32_address(pnode, offset)); 630 } 631 632 /* 633 * Access Global MMR space using the MMR space located at the top of physical 634 * memory. 635 */ 636 static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset) 637 { 638 return __va(UV_GLOBAL_MMR64_BASE | 639 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 640 } 641 642 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) 643 { 644 writeq(val, uv_global_mmr64_address(pnode, offset)); 645 } 646 647 static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) 648 { 649 return readq(uv_global_mmr64_address(pnode, offset)); 650 } 651 652 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) 653 { 654 writeb(val, uv_global_mmr64_address(pnode, offset)); 655 } 656 657 static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset) 658 { 659 return readb(uv_global_mmr64_address(pnode, offset)); 660 } 661 662 /* 663 * Access hub local MMRs. Faster than using global space but only local MMRs 664 * are accessible. 665 */ 666 static inline unsigned long *uv_local_mmr_address(unsigned long offset) 667 { 668 return __va(UV_LOCAL_MMR_BASE | offset); 669 } 670 671 static inline unsigned long uv_read_local_mmr(unsigned long offset) 672 { 673 return readq(uv_local_mmr_address(offset)); 674 } 675 676 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) 677 { 678 writeq(val, uv_local_mmr_address(offset)); 679 } 680 681 static inline unsigned char uv_read_local_mmr8(unsigned long offset) 682 { 683 return readb(uv_local_mmr_address(offset)); 684 } 685 686 static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) 687 { 688 writeb(val, uv_local_mmr_address(offset)); 689 } 690 691 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ 692 static inline int uv_blade_processor_id(void) 693 { 694 return uv_cpu_info->blade_cpu_id; 695 } 696 697 /* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */ 698 static inline int uv_cpu_blade_processor_id(int cpu) 699 { 700 return uv_cpu_info_per(cpu)->blade_cpu_id; 701 } 702 #define _uv_cpu_blade_processor_id 1 /* indicate function available */ 703 704 /* Blade number to Node number (UV1..UV4 is 1:1) */ 705 static inline int uv_blade_to_node(int blade) 706 { 707 return blade; 708 } 709 710 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ 711 static inline int uv_numa_blade_id(void) 712 { 713 return uv_hub_info->numa_blade_id; 714 } 715 716 /* 717 * Convert linux node number to the UV blade number. 718 * .. Currently for UV1 thru UV4 the node and the blade are identical. 719 * .. If this changes then you MUST check references to this function! 720 */ 721 static inline int uv_node_to_blade_id(int nid) 722 { 723 return nid; 724 } 725 726 /* Convert a cpu number to the the UV blade number */ 727 static inline int uv_cpu_to_blade_id(int cpu) 728 { 729 return uv_node_to_blade_id(cpu_to_node(cpu)); 730 } 731 732 /* Convert a blade id to the PNODE of the blade */ 733 static inline int uv_blade_to_pnode(int bid) 734 { 735 return uv_hub_info_list(uv_blade_to_node(bid))->pnode; 736 } 737 738 /* Nid of memory node on blade. -1 if no blade-local memory */ 739 static inline int uv_blade_to_memory_nid(int bid) 740 { 741 return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid; 742 } 743 744 /* Determine the number of possible cpus on a blade */ 745 static inline int uv_blade_nr_possible_cpus(int bid) 746 { 747 return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus; 748 } 749 750 /* Determine the number of online cpus on a blade */ 751 static inline int uv_blade_nr_online_cpus(int bid) 752 { 753 return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus; 754 } 755 756 /* Convert a cpu id to the PNODE of the blade containing the cpu */ 757 static inline int uv_cpu_to_pnode(int cpu) 758 { 759 return uv_cpu_hub_info(cpu)->pnode; 760 } 761 762 /* Convert a linux node number to the PNODE of the blade */ 763 static inline int uv_node_to_pnode(int nid) 764 { 765 return uv_hub_info_list(nid)->pnode; 766 } 767 768 /* Maximum possible number of blades */ 769 extern short uv_possible_blades; 770 static inline int uv_num_possible_blades(void) 771 { 772 return uv_possible_blades; 773 } 774 775 /* Per Hub NMI support */ 776 extern void uv_nmi_setup(void); 777 extern void uv_nmi_setup_hubless(void); 778 779 /* BIOS/Kernel flags exchange MMR */ 780 #define UVH_BIOS_KERNEL_MMR UVH_SCRATCH5 781 #define UVH_BIOS_KERNEL_MMR_ALIAS UVH_SCRATCH5_ALIAS 782 #define UVH_BIOS_KERNEL_MMR_ALIAS_2 UVH_SCRATCH5_ALIAS_2 783 784 /* TSC sync valid, set by BIOS */ 785 #define UVH_TSC_SYNC_MMR UVH_BIOS_KERNEL_MMR 786 #define UVH_TSC_SYNC_SHIFT 10 787 #define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */ 788 #define UVH_TSC_SYNC_MASK 3 /* 0011 */ 789 #define UVH_TSC_SYNC_VALID 3 /* 0011 */ 790 #define UVH_TSC_SYNC_INVALID 2 /* 0010 */ 791 792 /* BMC sets a bit this MMR non-zero before sending an NMI */ 793 #define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR 794 #define UVH_NMI_MMR_CLEAR UVH_BIOS_KERNEL_MMR_ALIAS 795 #define UVH_NMI_MMR_SHIFT 63 796 #define UVH_NMI_MMR_TYPE "SCRATCH5" 797 798 /* Newer SMM NMI handler, not present in all systems */ 799 #define UVH_NMI_MMRX UVH_EVENT_OCCURRED0 800 #define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS 801 #define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 802 #define UVH_NMI_MMRX_TYPE "EXTIO_INT0" 803 804 /* Non-zero indicates newer SMM NMI handler present */ 805 #define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST 806 807 /* Indicates to BIOS that we want to use the newer SMM NMI handler */ 808 #define UVH_NMI_MMRX_REQ UVH_BIOS_KERNEL_MMR_ALIAS_2 809 #define UVH_NMI_MMRX_REQ_SHIFT 62 810 811 struct uv_hub_nmi_s { 812 raw_spinlock_t nmi_lock; 813 atomic_t in_nmi; /* flag this node in UV NMI IRQ */ 814 atomic_t cpu_owner; /* last locker of this struct */ 815 atomic_t read_mmr_count; /* count of MMR reads */ 816 atomic_t nmi_count; /* count of true UV NMIs */ 817 unsigned long nmi_value; /* last value read from NMI MMR */ 818 bool hub_present; /* false means UV hubless system */ 819 bool pch_owner; /* indicates this hub owns PCH */ 820 }; 821 822 struct uv_cpu_nmi_s { 823 struct uv_hub_nmi_s *hub; 824 int state; 825 int pinging; 826 int queries; 827 int pings; 828 }; 829 830 DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); 831 832 #define uv_hub_nmi this_cpu_read(uv_cpu_nmi.hub) 833 #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu)) 834 #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) 835 836 /* uv_cpu_nmi_states */ 837 #define UV_NMI_STATE_OUT 0 838 #define UV_NMI_STATE_IN 1 839 #define UV_NMI_STATE_DUMP 2 840 #define UV_NMI_STATE_DUMP_DONE 3 841 842 /* Update SCIR state */ 843 static inline void uv_set_scir_bits(unsigned char value) 844 { 845 if (uv_scir_info->state != value) { 846 uv_scir_info->state = value; 847 uv_write_local_mmr8(uv_scir_info->offset, value); 848 } 849 } 850 851 static inline unsigned long uv_scir_offset(int apicid) 852 { 853 return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); 854 } 855 856 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) 857 { 858 if (uv_cpu_scir_info(cpu)->state != value) { 859 uv_write_global_mmr8(uv_cpu_to_pnode(cpu), 860 uv_cpu_scir_info(cpu)->offset, value); 861 uv_cpu_scir_info(cpu)->state = value; 862 } 863 } 864 865 extern unsigned int uv_apicid_hibits; 866 static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) 867 { 868 apicid |= uv_apicid_hibits; 869 return (1UL << UVH_IPI_INT_SEND_SHFT) | 870 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | 871 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | 872 (vector << UVH_IPI_INT_VECTOR_SHFT); 873 } 874 875 static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) 876 { 877 unsigned long val; 878 unsigned long dmode = dest_Fixed; 879 880 if (vector == NMI_VECTOR) 881 dmode = dest_NMI; 882 883 val = uv_hub_ipi_value(apicid, vector, dmode); 884 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 885 } 886 887 /* 888 * Get the minimum revision number of the hub chips within the partition. 889 * (See UVx_HUB_REVISION_BASE above for specific values.) 890 */ 891 static inline int uv_get_min_hub_revision_id(void) 892 { 893 return uv_hub_info->hub_revision; 894 } 895 896 #endif /* CONFIG_X86_64 */ 897 #endif /* _ASM_X86_UV_UV_HUB_H */ 898