1 /* 2 * SCLP early driver 3 * 4 * Copyright IBM Corp. 2013 5 */ 6 7 #define KMSG_COMPONENT "sclp_early" 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 10 #include <linux/errno.h> 11 #include <asm/ctl_reg.h> 12 #include <asm/sclp.h> 13 #include <asm/ipl.h> 14 #include "sclp_sdias.h" 15 #include "sclp.h" 16 17 #define SCLP_CMDW_READ_SCP_INFO 0x00020001 18 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 19 20 struct read_info_sccb { 21 struct sccb_header header; /* 0-7 */ 22 u16 rnmax; /* 8-9 */ 23 u8 rnsize; /* 10 */ 24 u8 _pad_11[16 - 11]; /* 11-15 */ 25 u16 ncpurl; /* 16-17 */ 26 u16 cpuoff; /* 18-19 */ 27 u8 _pad_20[24 - 20]; /* 20-23 */ 28 u8 loadparm[8]; /* 24-31 */ 29 u8 _pad_32[42 - 32]; /* 32-41 */ 30 u8 fac42; /* 42 */ 31 u8 fac43; /* 43 */ 32 u8 _pad_44[48 - 44]; /* 44-47 */ 33 u64 facilities; /* 48-55 */ 34 u8 _pad_56[66 - 56]; /* 56-65 */ 35 u8 fac66; /* 66 */ 36 u8 _pad_67[76 - 67]; /* 67-83 */ 37 u32 ibc; /* 76-79 */ 38 u8 _pad80[84 - 80]; /* 80-83 */ 39 u8 fac84; /* 84 */ 40 u8 fac85; /* 85 */ 41 u8 _pad_86[91 - 86]; /* 86-90 */ 42 u8 flags; /* 91 */ 43 u8 _pad_92[99 - 92]; /* 92-98 */ 44 u8 hamaxpow; /* 99 */ 45 u32 rnsize2; /* 100-103 */ 46 u64 rnmax2; /* 104-111 */ 47 u8 _pad_112[116 - 112]; /* 112-115 */ 48 u8 fac116; /* 116 */ 49 u8 _pad_117[119 - 117]; /* 117-118 */ 50 u8 fac119; /* 119 */ 51 u16 hcpua; /* 120-121 */ 52 u8 _pad_122[4096 - 122]; /* 122-4095 */ 53 } __packed __aligned(PAGE_SIZE); 54 55 static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata; 56 static struct sclp_ipl_info sclp_ipl_info; 57 58 struct sclp_info sclp; 59 EXPORT_SYMBOL(sclp); 60 61 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 62 { 63 int rc; 64 65 __ctl_set_bit(0, 9); 66 rc = sclp_service_call(cmd, sccb); 67 if (rc) 68 goto out; 69 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | 70 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); 71 local_irq_disable(); 72 out: 73 /* Contents of the sccb might have changed. */ 74 barrier(); 75 __ctl_clear_bit(0, 9); 76 return rc; 77 } 78 79 static int __init sclp_read_info_early(struct read_info_sccb *sccb) 80 { 81 int rc, i; 82 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 83 SCLP_CMDW_READ_SCP_INFO}; 84 85 for (i = 0; i < ARRAY_SIZE(commands); i++) { 86 do { 87 memset(sccb, 0, sizeof(*sccb)); 88 sccb->header.length = sizeof(*sccb); 89 sccb->header.function_code = 0x80; 90 sccb->header.control_mask[2] = 0x80; 91 rc = sclp_cmd_sync_early(commands[i], sccb); 92 } while (rc == -EBUSY); 93 94 if (rc) 95 break; 96 if (sccb->header.response_code == 0x10) 97 return 0; 98 if (sccb->header.response_code != 0x1f0) 99 break; 100 } 101 return -EIO; 102 } 103 104 static void __init sclp_facilities_detect(struct read_info_sccb *sccb) 105 { 106 struct sclp_core_entry *cpue; 107 u16 boot_cpu_address, cpu; 108 109 if (sclp_read_info_early(sccb)) 110 return; 111 112 sclp.facilities = sccb->facilities; 113 sclp.has_sprp = !!(sccb->fac84 & 0x02); 114 sclp.has_core_type = !!(sccb->fac84 & 0x01); 115 sclp.has_esca = !!(sccb->fac116 & 0x08); 116 sclp.has_hvs = !!(sccb->fac119 & 0x80); 117 if (sccb->fac85 & 0x02) 118 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; 119 sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 120 sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 121 sclp.rzm <<= 20; 122 sclp.ibc = sccb->ibc; 123 124 if (sccb->hamaxpow && sccb->hamaxpow < 64) 125 sclp.hamax = (1UL << sccb->hamaxpow) - 1; 126 else 127 sclp.hamax = U64_MAX; 128 129 if (!sccb->hcpua) { 130 if (MACHINE_IS_VM) 131 sclp.max_cores = 64; 132 else 133 sclp.max_cores = sccb->ncpurl; 134 } else { 135 sclp.max_cores = sccb->hcpua + 1; 136 } 137 138 boot_cpu_address = stap(); 139 cpue = (void *)sccb + sccb->cpuoff; 140 for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) { 141 if (boot_cpu_address != cpue->core_id) 142 continue; 143 sclp.has_siif = cpue->siif; 144 sclp.has_sigpif = cpue->sigpif; 145 sclp.has_sief2 = cpue->sief2; 146 break; 147 } 148 149 /* Save IPL information */ 150 sclp_ipl_info.is_valid = 1; 151 if (sccb->flags & 0x2) 152 sclp_ipl_info.has_dump = 1; 153 memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); 154 155 sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0; 156 sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0; 157 sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0; 158 } 159 160 /* 161 * This function will be called after sclp_facilities_detect(), which gets 162 * called from early.c code. The sclp_facilities_detect() function retrieves 163 * and saves the IPL information. 164 */ 165 void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 166 { 167 *info = sclp_ipl_info; 168 } 169 170 static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) 171 { 172 int rc; 173 174 do { 175 rc = sclp_cmd_sync_early(cmd, sccb); 176 } while (rc == -EBUSY); 177 178 if (rc) 179 return -EIO; 180 if (((struct sccb_header *) sccb)->response_code != 0x0020) 181 return -EIO; 182 return 0; 183 } 184 185 static void __init sccb_init_eq_size(struct sdias_sccb *sccb) 186 { 187 memset(sccb, 0, sizeof(*sccb)); 188 189 sccb->hdr.length = sizeof(*sccb); 190 sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); 191 sccb->evbuf.hdr.type = EVTYP_SDIAS; 192 sccb->evbuf.event_qual = SDIAS_EQ_SIZE; 193 sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; 194 sccb->evbuf.event_id = 4712; 195 sccb->evbuf.dbs = 1; 196 } 197 198 static int __init sclp_set_event_mask(struct init_sccb *sccb, 199 unsigned long receive_mask, 200 unsigned long send_mask) 201 { 202 memset(sccb, 0, sizeof(*sccb)); 203 sccb->header.length = sizeof(*sccb); 204 sccb->mask_length = sizeof(sccb_mask_t); 205 sccb->receive_mask = receive_mask; 206 sccb->send_mask = send_mask; 207 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); 208 } 209 210 static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) 211 { 212 sccb_init_eq_size(sccb); 213 if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) 214 return -EIO; 215 if (sccb->evbuf.blk_cnt == 0) 216 return 0; 217 return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; 218 } 219 220 static long __init sclp_hsa_copy_wait(struct sccb_header *sccb) 221 { 222 memset(sccb, 0, PAGE_SIZE); 223 sccb->length = PAGE_SIZE; 224 if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) 225 return -EIO; 226 if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0) 227 return 0; 228 return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; 229 } 230 231 static void __init sclp_hsa_size_detect(void *sccb) 232 { 233 long size; 234 235 /* First try synchronous interface (LPAR) */ 236 if (sclp_set_event_mask(sccb, 0, 0x40000010)) 237 return; 238 size = sclp_hsa_size_init(sccb); 239 if (size < 0) 240 return; 241 if (size != 0) 242 goto out; 243 /* Then try asynchronous interface (z/VM) */ 244 if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010)) 245 return; 246 size = sclp_hsa_size_init(sccb); 247 if (size < 0) 248 return; 249 size = sclp_hsa_copy_wait(sccb); 250 if (size < 0) 251 return; 252 out: 253 sclp.hsa_size = size; 254 } 255 256 static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb) 257 { 258 if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK)) 259 return 0; 260 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) 261 return 0; 262 return 1; 263 } 264 265 static void __init sclp_console_detect(struct init_sccb *sccb) 266 { 267 if (sccb->header.response_code != 0x20) 268 return; 269 270 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) 271 sclp.has_vt220 = 1; 272 273 if (sclp_con_check_linemode(sccb)) 274 sclp.has_linemode = 1; 275 } 276 277 void __init sclp_early_detect(void) 278 { 279 void *sccb = &sccb_early; 280 281 sclp_facilities_detect(sccb); 282 sclp_hsa_size_detect(sccb); 283 284 /* Turn off SCLP event notifications. Also save remote masks in the 285 * sccb. These are sufficient to detect sclp console capabilities. 286 */ 287 sclp_set_event_mask(sccb, 0, 0); 288 sclp_console_detect(sccb); 289 } 290