1 /* 2 * Virtio driver bits 3 * 4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or (at 7 * your option) any later version. See the COPYING file in the top-level 8 * directory. 9 */ 10 11 #include "s390-ccw.h" 12 #include "virtio.h" 13 14 static struct vring block; 15 16 static char chsc_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 17 18 static long kvm_hypercall(unsigned long nr, unsigned long param1, 19 unsigned long param2) 20 { 21 register ulong r_nr asm("1") = nr; 22 register ulong r_param1 asm("2") = param1; 23 register ulong r_param2 asm("3") = param2; 24 register long retval asm("2"); 25 26 asm volatile ("diag 2,4,0x500" 27 : "=d" (retval) 28 : "d" (r_nr), "0" (r_param1), "r"(r_param2) 29 : "memory", "cc"); 30 31 return retval; 32 } 33 34 static void virtio_notify(struct subchannel_id schid) 35 { 36 kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, *(u32 *)&schid, 0); 37 } 38 39 /*********************************************** 40 * Virtio functions * 41 ***********************************************/ 42 43 static int drain_irqs(struct subchannel_id schid) 44 { 45 struct irb irb = {}; 46 int r = 0; 47 48 while (1) { 49 /* FIXME: make use of TPI, for that enable subchannel and isc */ 50 if (tsch(schid, &irb)) { 51 /* Might want to differentiate error codes later on. */ 52 if (irb.scsw.cstat) { 53 r = -EIO; 54 } else if (irb.scsw.dstat != 0xc) { 55 r = -EIO; 56 } 57 return r; 58 } 59 } 60 } 61 62 static int run_ccw(struct subchannel_id schid, int cmd, void *ptr, int len) 63 { 64 struct ccw1 ccw = {}; 65 struct cmd_orb orb = {}; 66 struct schib schib; 67 int r; 68 69 /* start command processing */ 70 stsch_err(schid, &schib); 71 schib.scsw.ctrl = SCSW_FCTL_START_FUNC; 72 msch(schid, &schib); 73 74 /* start subchannel command */ 75 orb.fmt = 1; 76 orb.cpa = (u32)(long)&ccw; 77 orb.lpm = 0x80; 78 79 ccw.cmd_code = cmd; 80 ccw.cda = (long)ptr; 81 ccw.count = len; 82 83 r = ssch(schid, &orb); 84 /* 85 * XXX Wait until device is done processing the CCW. For now we can 86 * assume that a simple tsch will have finished the CCW processing, 87 * but the architecture allows for asynchronous operation 88 */ 89 if (!r) { 90 r = drain_irqs(schid); 91 } 92 return r; 93 } 94 95 static void virtio_set_status(struct subchannel_id schid, 96 unsigned long dev_addr) 97 { 98 unsigned char status = dev_addr; 99 if (run_ccw(schid, CCW_CMD_WRITE_STATUS, &status, sizeof(status))) { 100 virtio_panic("Could not write status to host!\n"); 101 } 102 } 103 104 static void virtio_reset(struct subchannel_id schid) 105 { 106 run_ccw(schid, CCW_CMD_VDEV_RESET, NULL, 0); 107 } 108 109 static void vring_init(struct vring *vr, unsigned int num, void *p, 110 unsigned long align) 111 { 112 debug_print_addr("init p", p); 113 vr->num = num; 114 vr->desc = p; 115 vr->avail = p + num*sizeof(struct vring_desc); 116 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) 117 & ~(align - 1)); 118 119 /* Zero out all relevant field */ 120 vr->avail->flags = 0; 121 vr->avail->idx = 0; 122 123 /* We're running with interrupts off anyways, so don't bother */ 124 vr->used->flags = VRING_USED_F_NO_NOTIFY; 125 vr->used->idx = 0; 126 vr->used_idx = 0; 127 vr->next_idx = 0; 128 129 debug_print_addr("init vr", vr); 130 } 131 132 static void vring_notify(struct subchannel_id schid) 133 { 134 virtio_notify(schid); 135 } 136 137 static void vring_send_buf(struct vring *vr, void *p, int len, int flags) 138 { 139 /* For follow-up chains we need to keep the first entry point */ 140 if (!(flags & VRING_HIDDEN_IS_CHAIN)) { 141 vr->avail->ring[vr->avail->idx % vr->num] = vr->next_idx; 142 } 143 144 vr->desc[vr->next_idx].addr = (ulong)p; 145 vr->desc[vr->next_idx].len = len; 146 vr->desc[vr->next_idx].flags = flags & ~VRING_HIDDEN_IS_CHAIN; 147 vr->desc[vr->next_idx].next = vr->next_idx; 148 vr->desc[vr->next_idx].next++; 149 vr->next_idx++; 150 151 /* Chains only have a single ID */ 152 if (!(flags & VRING_DESC_F_NEXT)) { 153 vr->avail->idx++; 154 } 155 } 156 157 static u64 get_clock(void) 158 { 159 u64 r; 160 161 asm volatile("stck %0" : "=Q" (r) : : "cc"); 162 return r; 163 } 164 165 static ulong get_second(void) 166 { 167 return (get_clock() >> 12) / 1000000; 168 } 169 170 /* 171 * Wait for the host to reply. 172 * 173 * timeout is in seconds if > 0. 174 * 175 * Returns 0 on success, 1 on timeout. 176 */ 177 static int vring_wait_reply(struct vring *vr, int timeout) 178 { 179 ulong target_second = get_second() + timeout; 180 struct subchannel_id schid = vr->schid; 181 int r = 0; 182 183 /* Wait until the used index has moved. */ 184 while (vr->used->idx == vr->used_idx) { 185 vring_notify(schid); 186 if (timeout && (get_second() >= target_second)) { 187 r = 1; 188 break; 189 } 190 yield(); 191 } 192 193 vr->used_idx = vr->used->idx; 194 vr->next_idx = 0; 195 vr->desc[0].len = 0; 196 vr->desc[0].flags = 0; 197 198 return r; 199 } 200 201 /*********************************************** 202 * Virtio block * 203 ***********************************************/ 204 205 int virtio_read_many(ulong sector, void *load_addr, int sec_num) 206 { 207 struct virtio_blk_outhdr out_hdr; 208 u8 status; 209 int r; 210 211 /* Tell the host we want to read */ 212 out_hdr.type = VIRTIO_BLK_T_IN; 213 out_hdr.ioprio = 99; 214 out_hdr.sector = virtio_sector_adjust(sector); 215 216 vring_send_buf(&block, &out_hdr, sizeof(out_hdr), VRING_DESC_F_NEXT); 217 218 /* This is where we want to receive data */ 219 vring_send_buf(&block, load_addr, virtio_get_block_size() * sec_num, 220 VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN | 221 VRING_DESC_F_NEXT); 222 223 /* status field */ 224 vring_send_buf(&block, &status, sizeof(u8), VRING_DESC_F_WRITE | 225 VRING_HIDDEN_IS_CHAIN); 226 227 /* Now we can tell the host to read */ 228 vring_wait_reply(&block, 0); 229 230 r = drain_irqs(block.schid); 231 if (r) { 232 /* Well, whatever status is supposed to contain... */ 233 status = 1; 234 } 235 return status; 236 } 237 238 unsigned long virtio_load_direct(ulong rec_list1, ulong rec_list2, 239 ulong subchan_id, void *load_addr) 240 { 241 u8 status; 242 int sec = rec_list1; 243 int sec_num = ((rec_list2 >> 32) & 0xffff) + 1; 244 int sec_len = rec_list2 >> 48; 245 ulong addr = (ulong)load_addr; 246 247 if (sec_len != virtio_get_block_size()) { 248 return -1; 249 } 250 251 sclp_print("."); 252 status = virtio_read_many(sec, (void *)addr, sec_num); 253 if (status) { 254 virtio_panic("I/O Error"); 255 } 256 addr += sec_num * virtio_get_block_size(); 257 258 return addr; 259 } 260 261 int virtio_read(ulong sector, void *load_addr) 262 { 263 return virtio_read_many(sector, load_addr, 1); 264 } 265 266 static VirtioBlkConfig blk_cfg = {}; 267 static bool guessed_disk_nature; 268 269 bool virtio_guessed_disk_nature(void) 270 { 271 return guessed_disk_nature; 272 } 273 274 void virtio_assume_scsi(void) 275 { 276 guessed_disk_nature = true; 277 blk_cfg.blk_size = 512; 278 blk_cfg.physical_block_exp = 0; 279 } 280 281 void virtio_assume_iso9660(void) 282 { 283 guessed_disk_nature = true; 284 blk_cfg.blk_size = 2048; 285 blk_cfg.physical_block_exp = 0; 286 } 287 288 void virtio_assume_eckd(void) 289 { 290 guessed_disk_nature = true; 291 blk_cfg.blk_size = 4096; 292 blk_cfg.physical_block_exp = 0; 293 294 /* this must be here to calculate code segment position */ 295 blk_cfg.geometry.heads = 15; 296 blk_cfg.geometry.sectors = 12; 297 } 298 299 bool virtio_disk_is_scsi(void) 300 { 301 if (guessed_disk_nature) { 302 return (virtio_get_block_size() == 512); 303 } 304 return (blk_cfg.geometry.heads == 255) 305 && (blk_cfg.geometry.sectors == 63) 306 && (virtio_get_block_size() == 512); 307 } 308 309 /* 310 * Other supported value pairs, if any, would need to be added here. 311 * Note: head count is always 15. 312 */ 313 static inline u8 virtio_eckd_sectors_for_block_size(int size) 314 { 315 switch (size) { 316 case 512: 317 return 49; 318 case 1024: 319 return 33; 320 case 2048: 321 return 21; 322 case 4096: 323 return 12; 324 } 325 return 0; 326 } 327 328 bool virtio_disk_is_eckd(void) 329 { 330 const int block_size = virtio_get_block_size(); 331 332 if (guessed_disk_nature) { 333 return (block_size == 4096); 334 } 335 return (blk_cfg.geometry.heads == 15) 336 && (blk_cfg.geometry.sectors == 337 virtio_eckd_sectors_for_block_size(block_size)); 338 } 339 340 bool virtio_ipl_disk_is_valid(void) 341 { 342 return virtio_disk_is_scsi() || virtio_disk_is_eckd(); 343 } 344 345 int virtio_get_block_size(void) 346 { 347 return blk_cfg.blk_size << blk_cfg.physical_block_exp; 348 } 349 350 uint8_t virtio_get_heads(void) 351 { 352 return blk_cfg.geometry.heads; 353 } 354 355 uint8_t virtio_get_sectors(void) 356 { 357 return blk_cfg.geometry.sectors; 358 } 359 360 uint64_t virtio_get_blocks(void) 361 { 362 return blk_cfg.capacity / 363 (virtio_get_block_size() / VIRTIO_SECTOR_SIZE); 364 } 365 366 void virtio_setup_block(struct subchannel_id schid) 367 { 368 struct vq_info_block info; 369 struct vq_config_block config = {}; 370 371 blk_cfg.blk_size = 0; /* mark "illegal" - setup started... */ 372 guessed_disk_nature = false; 373 374 virtio_reset(schid); 375 376 /* 377 * Skipping CCW_CMD_READ_FEAT. We're not doing anything fancy, and 378 * we'll just stop dead anyway if anything does not work like we 379 * expect it. 380 */ 381 382 config.index = 0; 383 if (run_ccw(schid, CCW_CMD_READ_VQ_CONF, &config, sizeof(config))) { 384 virtio_panic("Could not get block device VQ configuration\n"); 385 } 386 if (run_ccw(schid, CCW_CMD_READ_CONF, &blk_cfg, sizeof(blk_cfg))) { 387 virtio_panic("Could not get block device configuration\n"); 388 } 389 vring_init(&block, config.num, ring_area, 390 KVM_S390_VIRTIO_RING_ALIGN); 391 392 info.queue = (unsigned long long) ring_area; 393 info.align = KVM_S390_VIRTIO_RING_ALIGN; 394 info.index = 0; 395 info.num = config.num; 396 block.schid = schid; 397 398 if (!run_ccw(schid, CCW_CMD_SET_VQ, &info, sizeof(info))) { 399 virtio_set_status(schid, VIRTIO_CONFIG_S_DRIVER_OK); 400 } 401 402 if (!virtio_ipl_disk_is_valid()) { 403 /* make sure all getters but blocksize return 0 for invalid IPL disk */ 404 memset(&blk_cfg, 0, sizeof(blk_cfg)); 405 virtio_assume_scsi(); 406 } 407 } 408 409 bool virtio_is_blk(struct subchannel_id schid) 410 { 411 int r; 412 struct senseid senseid = {}; 413 414 /* run sense id command */ 415 r = run_ccw(schid, CCW_CMD_SENSE_ID, &senseid, sizeof(senseid)); 416 if (r) { 417 return false; 418 } 419 if ((senseid.cu_type != 0x3832) || (senseid.cu_model != VIRTIO_ID_BLOCK)) { 420 return false; 421 } 422 423 return true; 424 } 425 426 int enable_mss_facility(void) 427 { 428 int ret; 429 struct chsc_area_sda *sda_area = (struct chsc_area_sda *) chsc_page; 430 431 memset(sda_area, 0, PAGE_SIZE); 432 sda_area->request.length = 0x0400; 433 sda_area->request.code = 0x0031; 434 sda_area->operation_code = 0x2; 435 436 ret = chsc(sda_area); 437 if ((ret == 0) && (sda_area->response.code == 0x0001)) { 438 return 0; 439 } 440 return -EIO; 441 } 442