1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV architectural definitions 7 * 8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. 9 */ 10 11 #ifndef __ASM_IA64_UV_HUB_H__ 12 #define __ASM_IA64_UV_HUB_H__ 13 14 #include <linux/numa.h> 15 #include <linux/percpu.h> 16 #include <asm/types.h> 17 #include <asm/percpu.h> 18 19 20 /* 21 * Addressing Terminology 22 * 23 * M - The low M bits of a physical address represent the offset 24 * into the blade local memory. RAM memory on a blade is physically 25 * contiguous (although various IO spaces may punch holes in 26 * it).. 27 * 28 * N - Number of bits in the node portion of a socket physical 29 * address. 30 * 31 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 32 * routers always have low bit of 1, C/MBricks have low bit 33 * equal to 0. Most addressing macros that target UV hub chips 34 * right shift the NASID by 1 to exclude the always-zero bit. 35 * NASIDs contain up to 15 bits. 36 * 37 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead 38 * of nasids. 39 * 40 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant 41 * of the nasid for socket usage. 42 * 43 * 44 * NumaLink Global Physical Address Format: 45 * +--------------------------------+---------------------+ 46 * |00..000| GNODE | NodeOffset | 47 * +--------------------------------+---------------------+ 48 * |<-------53 - M bits --->|<--------M bits -----> 49 * 50 * M - number of node offset bits (35 .. 40) 51 * 52 * 53 * Memory/UV-HUB Processor Socket Address Format: 54 * +----------------+---------------+---------------------+ 55 * |00..000000000000| PNODE | NodeOffset | 56 * +----------------+---------------+---------------------+ 57 * <--- N bits --->|<--------M bits -----> 58 * 59 * M - number of node offset bits (35 .. 40) 60 * N - number of PNODE bits (0 .. 10) 61 * 62 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). 63 * The actual values are configuration dependent and are set at 64 * boot time. M & N values are set by the hardware/BIOS at boot. 65 */ 66 67 68 /* 69 * Maximum number of bricks in all partitions and in all coherency domains. 70 * This is the total number of bricks accessible in the numalink fabric. It 71 * includes all C & M bricks. Routers are NOT included. 72 * 73 * This value is also the value of the maximum number of non-router NASIDs 74 * in the numalink fabric. 75 * 76 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused. 77 */ 78 #define UV_MAX_NUMALINK_BLADES 16384 79 80 /* 81 * Maximum number of C/Mbricks within a software SSI (hardware may support 82 * more). 83 */ 84 #define UV_MAX_SSI_BLADES 1 85 86 /* 87 * The largest possible NASID of a C or M brick (+ 2) 88 */ 89 #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) 90 91 /* 92 * The following defines attributes of the HUB chip. These attributes are 93 * frequently referenced and are kept in the per-cpu data areas of each cpu. 94 * They are kept together in a struct to minimize cache misses. 95 */ 96 struct uv_hub_info_s { 97 unsigned long global_mmr_base; 98 unsigned long gpa_mask; 99 unsigned long gnode_upper; 100 unsigned long lowmem_remap_top; 101 unsigned long lowmem_remap_base; 102 unsigned short pnode; 103 unsigned short pnode_mask; 104 unsigned short coherency_domain_number; 105 unsigned short numa_blade_id; 106 unsigned char blade_processor_id; 107 unsigned char m_val; 108 unsigned char n_val; 109 }; 110 DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 111 #define uv_hub_info this_cpu_ptr(&__uv_hub_info) 112 #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 113 114 /* 115 * Local & Global MMR space macros. 116 * Note: macros are intended to be used ONLY by inline functions 117 * in this file - not by other kernel code. 118 * n - NASID (full 15-bit global nasid) 119 * g - GNODE (full 15-bit global nasid, right shifted 1) 120 * p - PNODE (local part of nsids, right shifted 1) 121 */ 122 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) 123 #define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper) 124 125 #define UV_LOCAL_MMR_BASE 0xf4000000UL 126 #define UV_GLOBAL_MMR32_BASE 0xf8000000UL 127 #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 128 129 #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 130 #define UV_GLOBAL_MMR64_PNODE_SHIFT 26 131 132 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) 133 134 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 135 ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT) 136 137 /* 138 * Macros for converting between kernel virtual addresses, socket local physical 139 * addresses, and UV global physical addresses. 140 * Note: use the standard __pa() & __va() macros for converting 141 * between socket virtual and socket physical addresses. 142 */ 143 144 /* socket phys RAM --> UV global physical address */ 145 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) 146 { 147 if (paddr < uv_hub_info->lowmem_remap_top) 148 paddr += uv_hub_info->lowmem_remap_base; 149 return paddr | uv_hub_info->gnode_upper; 150 } 151 152 153 /* socket virtual --> UV global physical address */ 154 static inline unsigned long uv_gpa(void *v) 155 { 156 return __pa(v) | uv_hub_info->gnode_upper; 157 } 158 159 /* socket virtual --> UV global physical address */ 160 static inline void *uv_vgpa(void *v) 161 { 162 return (void *)uv_gpa(v); 163 } 164 165 /* UV global physical address --> socket virtual */ 166 static inline void *uv_va(unsigned long gpa) 167 { 168 return __va(gpa & uv_hub_info->gpa_mask); 169 } 170 171 /* pnode, offset --> socket virtual */ 172 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) 173 { 174 return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset); 175 } 176 177 178 /* 179 * Access global MMRs using the low memory MMR32 space. This region supports 180 * faster MMR access but not all MMRs are accessible in this space. 181 */ 182 static inline unsigned long *uv_global_mmr32_address(int pnode, 183 unsigned long offset) 184 { 185 return __va(UV_GLOBAL_MMR32_BASE | 186 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); 187 } 188 189 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, 190 unsigned long val) 191 { 192 *uv_global_mmr32_address(pnode, offset) = val; 193 } 194 195 static inline unsigned long uv_read_global_mmr32(int pnode, 196 unsigned long offset) 197 { 198 return *uv_global_mmr32_address(pnode, offset); 199 } 200 201 /* 202 * Access Global MMR space using the MMR space located at the top of physical 203 * memory. 204 */ 205 static inline unsigned long *uv_global_mmr64_address(int pnode, 206 unsigned long offset) 207 { 208 return __va(UV_GLOBAL_MMR64_BASE | 209 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 210 } 211 212 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, 213 unsigned long val) 214 { 215 *uv_global_mmr64_address(pnode, offset) = val; 216 } 217 218 static inline unsigned long uv_read_global_mmr64(int pnode, 219 unsigned long offset) 220 { 221 return *uv_global_mmr64_address(pnode, offset); 222 } 223 224 /* 225 * Access hub local MMRs. Faster than using global space but only local MMRs 226 * are accessible. 227 */ 228 static inline unsigned long *uv_local_mmr_address(unsigned long offset) 229 { 230 return __va(UV_LOCAL_MMR_BASE | offset); 231 } 232 233 static inline unsigned long uv_read_local_mmr(unsigned long offset) 234 { 235 return *uv_local_mmr_address(offset); 236 } 237 238 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) 239 { 240 *uv_local_mmr_address(offset) = val; 241 } 242 243 /* 244 * Structures and definitions for converting between cpu, node, pnode, and blade 245 * numbers. 246 */ 247 248 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ 249 static inline int uv_blade_processor_id(void) 250 { 251 return smp_processor_id(); 252 } 253 254 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ 255 static inline int uv_numa_blade_id(void) 256 { 257 return 0; 258 } 259 260 /* Convert a cpu number to the the UV blade number */ 261 static inline int uv_cpu_to_blade_id(int cpu) 262 { 263 return 0; 264 } 265 266 /* Convert linux node number to the UV blade number */ 267 static inline int uv_node_to_blade_id(int nid) 268 { 269 return 0; 270 } 271 272 /* Convert a blade id to the PNODE of the blade */ 273 static inline int uv_blade_to_pnode(int bid) 274 { 275 return 0; 276 } 277 278 /* Determine the number of possible cpus on a blade */ 279 static inline int uv_blade_nr_possible_cpus(int bid) 280 { 281 return num_possible_cpus(); 282 } 283 284 /* Determine the number of online cpus on a blade */ 285 static inline int uv_blade_nr_online_cpus(int bid) 286 { 287 return num_online_cpus(); 288 } 289 290 /* Convert a cpu id to the PNODE of the blade containing the cpu */ 291 static inline int uv_cpu_to_pnode(int cpu) 292 { 293 return 0; 294 } 295 296 /* Convert a linux node number to the PNODE of the blade */ 297 static inline int uv_node_to_pnode(int nid) 298 { 299 return 0; 300 } 301 302 /* Maximum possible number of blades */ 303 static inline int uv_num_possible_blades(void) 304 { 305 return 1; 306 } 307 308 static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) 309 { 310 /* not currently needed on ia64 */ 311 } 312 313 314 #endif /* __ASM_IA64_UV_HUB__ */ 315 316