1 /* 2 * SCLP early driver 3 * 4 * Copyright IBM Corp. 2013 5 */ 6 7 #define KMSG_COMPONENT "sclp_early" 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 10 #include <asm/ctl_reg.h> 11 #include <asm/sclp.h> 12 #include <asm/ipl.h> 13 #include "sclp_sdias.h" 14 #include "sclp.h" 15 16 #define SCLP_CMDW_READ_SCP_INFO 0x00020001 17 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 18 19 struct read_info_sccb { 20 struct sccb_header header; /* 0-7 */ 21 u16 rnmax; /* 8-9 */ 22 u8 rnsize; /* 10 */ 23 u8 _pad_11[16 - 11]; /* 11-15 */ 24 u16 ncpurl; /* 16-17 */ 25 u16 cpuoff; /* 18-19 */ 26 u8 _pad_20[24 - 20]; /* 20-23 */ 27 u8 loadparm[8]; /* 24-31 */ 28 u8 _pad_32[42 - 32]; /* 32-41 */ 29 u8 fac42; /* 42 */ 30 u8 fac43; /* 43 */ 31 u8 _pad_44[48 - 44]; /* 44-47 */ 32 u64 facilities; /* 48-55 */ 33 u8 _pad_56[66 - 56]; /* 56-65 */ 34 u8 fac66; /* 66 */ 35 u8 _pad_67[76 - 67]; /* 67-83 */ 36 u32 ibc; /* 76-79 */ 37 u8 _pad80[84 - 80]; /* 80-83 */ 38 u8 fac84; /* 84 */ 39 u8 fac85; /* 85 */ 40 u8 _pad_86[91 - 86]; /* 86-90 */ 41 u8 flags; /* 91 */ 42 u8 _pad_92[100 - 92]; /* 92-99 */ 43 u32 rnsize2; /* 100-103 */ 44 u64 rnmax2; /* 104-111 */ 45 u8 _pad_112[120 - 112]; /* 112-119 */ 46 u16 hcpua; /* 120-121 */ 47 u8 _pad_122[4096 - 122]; /* 122-4095 */ 48 } __packed __aligned(PAGE_SIZE); 49 50 static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata; 51 static unsigned int sclp_con_has_vt220 __initdata; 52 static unsigned int sclp_con_has_linemode __initdata; 53 static unsigned long sclp_hsa_size; 54 static unsigned int sclp_max_cpu; 55 static struct sclp_ipl_info sclp_ipl_info; 56 static unsigned char sclp_siif; 57 static unsigned char sclp_sigpif; 58 static u32 sclp_ibc; 59 static unsigned int sclp_mtid; 60 static unsigned int sclp_mtid_cp; 61 static unsigned int sclp_mtid_max; 62 static unsigned int sclp_mtid_prev; 63 64 u64 sclp_facilities; 65 u8 sclp_fac84; 66 unsigned long long sclp_rzm; 67 unsigned long long sclp_rnmax; 68 69 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 70 { 71 int rc; 72 73 __ctl_set_bit(0, 9); 74 rc = sclp_service_call(cmd, sccb); 75 if (rc) 76 goto out; 77 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | 78 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); 79 local_irq_disable(); 80 out: 81 /* Contents of the sccb might have changed. */ 82 barrier(); 83 __ctl_clear_bit(0, 9); 84 return rc; 85 } 86 87 static int __init sclp_read_info_early(struct read_info_sccb *sccb) 88 { 89 int rc, i; 90 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 91 SCLP_CMDW_READ_SCP_INFO}; 92 93 for (i = 0; i < ARRAY_SIZE(commands); i++) { 94 do { 95 memset(sccb, 0, sizeof(*sccb)); 96 sccb->header.length = sizeof(*sccb); 97 sccb->header.function_code = 0x80; 98 sccb->header.control_mask[2] = 0x80; 99 rc = sclp_cmd_sync_early(commands[i], sccb); 100 } while (rc == -EBUSY); 101 102 if (rc) 103 break; 104 if (sccb->header.response_code == 0x10) 105 return 0; 106 if (sccb->header.response_code != 0x1f0) 107 break; 108 } 109 return -EIO; 110 } 111 112 static void __init sclp_facilities_detect(struct read_info_sccb *sccb) 113 { 114 struct sclp_cpu_entry *cpue; 115 u16 boot_cpu_address, cpu; 116 117 if (sclp_read_info_early(sccb)) 118 return; 119 120 sclp_facilities = sccb->facilities; 121 sclp_fac84 = sccb->fac84; 122 if (sccb->fac85 & 0x02) 123 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; 124 sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 125 sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 126 sclp_rzm <<= 20; 127 sclp_ibc = sccb->ibc; 128 129 if (!sccb->hcpua) { 130 if (MACHINE_IS_VM) 131 sclp_max_cpu = 64; 132 else 133 sclp_max_cpu = sccb->ncpurl; 134 } else { 135 sclp_max_cpu = sccb->hcpua + 1; 136 } 137 138 boot_cpu_address = stap(); 139 cpue = (void *)sccb + sccb->cpuoff; 140 for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) { 141 if (boot_cpu_address != cpue->core_id) 142 continue; 143 sclp_siif = cpue->siif; 144 sclp_sigpif = cpue->sigpif; 145 break; 146 } 147 148 /* Save IPL information */ 149 sclp_ipl_info.is_valid = 1; 150 if (sccb->flags & 0x2) 151 sclp_ipl_info.has_dump = 1; 152 memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); 153 154 sclp_mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0; 155 sclp_mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0; 156 sclp_mtid_max = max(sclp_mtid, sclp_mtid_cp); 157 sclp_mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0; 158 } 159 160 bool __init sclp_has_linemode(void) 161 { 162 return !!sclp_con_has_linemode; 163 } 164 165 bool __init sclp_has_vt220(void) 166 { 167 return !!sclp_con_has_vt220; 168 } 169 170 unsigned long long sclp_get_rnmax(void) 171 { 172 return sclp_rnmax; 173 } 174 175 unsigned long long sclp_get_rzm(void) 176 { 177 return sclp_rzm; 178 } 179 180 unsigned int sclp_get_max_cpu(void) 181 { 182 return sclp_max_cpu; 183 } 184 185 int sclp_has_siif(void) 186 { 187 return sclp_siif; 188 } 189 EXPORT_SYMBOL(sclp_has_siif); 190 191 int sclp_has_sigpif(void) 192 { 193 return sclp_sigpif; 194 } 195 EXPORT_SYMBOL(sclp_has_sigpif); 196 197 unsigned int sclp_get_ibc(void) 198 { 199 return sclp_ibc; 200 } 201 EXPORT_SYMBOL(sclp_get_ibc); 202 203 unsigned int sclp_get_mtid(u8 cpu_type) 204 { 205 return cpu_type ? sclp_mtid : sclp_mtid_cp; 206 } 207 208 unsigned int sclp_get_mtid_max(void) 209 { 210 return sclp_mtid_max; 211 } 212 213 unsigned int sclp_get_mtid_prev(void) 214 { 215 return sclp_mtid_prev; 216 } 217 218 /* 219 * This function will be called after sclp_facilities_detect(), which gets 220 * called from early.c code. The sclp_facilities_detect() function retrieves 221 * and saves the IPL information. 222 */ 223 void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 224 { 225 *info = sclp_ipl_info; 226 } 227 228 static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) 229 { 230 int rc; 231 232 do { 233 rc = sclp_cmd_sync_early(cmd, sccb); 234 } while (rc == -EBUSY); 235 236 if (rc) 237 return -EIO; 238 if (((struct sccb_header *) sccb)->response_code != 0x0020) 239 return -EIO; 240 return 0; 241 } 242 243 static void __init sccb_init_eq_size(struct sdias_sccb *sccb) 244 { 245 memset(sccb, 0, sizeof(*sccb)); 246 247 sccb->hdr.length = sizeof(*sccb); 248 sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); 249 sccb->evbuf.hdr.type = EVTYP_SDIAS; 250 sccb->evbuf.event_qual = SDIAS_EQ_SIZE; 251 sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; 252 sccb->evbuf.event_id = 4712; 253 sccb->evbuf.dbs = 1; 254 } 255 256 static int __init sclp_set_event_mask(struct init_sccb *sccb, 257 unsigned long receive_mask, 258 unsigned long send_mask) 259 { 260 memset(sccb, 0, sizeof(*sccb)); 261 sccb->header.length = sizeof(*sccb); 262 sccb->mask_length = sizeof(sccb_mask_t); 263 sccb->receive_mask = receive_mask; 264 sccb->send_mask = send_mask; 265 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); 266 } 267 268 static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) 269 { 270 sccb_init_eq_size(sccb); 271 if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) 272 return -EIO; 273 if (sccb->evbuf.blk_cnt == 0) 274 return 0; 275 return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; 276 } 277 278 static long __init sclp_hsa_copy_wait(struct sccb_header *sccb) 279 { 280 memset(sccb, 0, PAGE_SIZE); 281 sccb->length = PAGE_SIZE; 282 if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) 283 return -EIO; 284 if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0) 285 return 0; 286 return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; 287 } 288 289 unsigned long sclp_get_hsa_size(void) 290 { 291 return sclp_hsa_size; 292 } 293 294 static void __init sclp_hsa_size_detect(void *sccb) 295 { 296 long size; 297 298 /* First try synchronous interface (LPAR) */ 299 if (sclp_set_event_mask(sccb, 0, 0x40000010)) 300 return; 301 size = sclp_hsa_size_init(sccb); 302 if (size < 0) 303 return; 304 if (size != 0) 305 goto out; 306 /* Then try asynchronous interface (z/VM) */ 307 if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010)) 308 return; 309 size = sclp_hsa_size_init(sccb); 310 if (size < 0) 311 return; 312 size = sclp_hsa_copy_wait(sccb); 313 if (size < 0) 314 return; 315 out: 316 sclp_hsa_size = size; 317 } 318 319 static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb) 320 { 321 if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK)) 322 return 0; 323 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) 324 return 0; 325 return 1; 326 } 327 328 static void __init sclp_console_detect(struct init_sccb *sccb) 329 { 330 if (sccb->header.response_code != 0x20) 331 return; 332 333 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) 334 sclp_con_has_vt220 = 1; 335 336 if (sclp_con_check_linemode(sccb)) 337 sclp_con_has_linemode = 1; 338 } 339 340 void __init sclp_early_detect(void) 341 { 342 void *sccb = &sccb_early; 343 344 sclp_facilities_detect(sccb); 345 sclp_hsa_size_detect(sccb); 346 347 /* Turn off SCLP event notifications. Also save remote masks in the 348 * sccb. These are sufficient to detect sclp console capabilities. 349 */ 350 sclp_set_event_mask(sccb, 0, 0); 351 sclp_console_detect(sccb); 352 } 353