1 /* 2 * Virtio driver bits 3 * 4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or (at 7 * your option) any later version. See the COPYING file in the top-level 8 * directory. 9 */ 10 11 #include "s390-ccw.h" 12 #include "virtio.h" 13 14 struct vring block; 15 16 static long kvm_hypercall(unsigned long nr, unsigned long param1, 17 unsigned long param2) 18 { 19 register ulong r_nr asm("1") = nr; 20 register ulong r_param1 asm("2") = param1; 21 register ulong r_param2 asm("3") = param2; 22 register long retval asm("2"); 23 24 asm volatile ("diag 2,4,0x500" 25 : "=d" (retval) 26 : "d" (r_nr), "0" (r_param1), "r"(r_param2) 27 : "memory", "cc"); 28 29 return retval; 30 } 31 32 static void virtio_notify(struct subchannel_id schid) 33 { 34 kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, *(u32*)&schid, 0); 35 } 36 37 /*********************************************** 38 * Virtio functions * 39 ***********************************************/ 40 41 static int drain_irqs(struct subchannel_id schid) 42 { 43 struct irb irb = {}; 44 int r = 0; 45 46 while (1) { 47 /* FIXME: make use of TPI, for that enable subchannel and isc */ 48 if (tsch(schid, &irb)) { 49 /* Might want to differentiate error codes later on. */ 50 if (irb.scsw.cstat) { 51 r = -EIO; 52 } else if (irb.scsw.dstat != 0xc) { 53 r = -EIO; 54 } 55 return r; 56 } 57 } 58 } 59 60 static int run_ccw(struct subchannel_id schid, int cmd, void *ptr, int len) 61 { 62 struct ccw1 ccw = {}; 63 struct cmd_orb orb = {}; 64 struct schib schib; 65 int r; 66 67 /* start command processing */ 68 stsch_err(schid, &schib); 69 schib.scsw.ctrl = SCSW_FCTL_START_FUNC; 70 msch(schid, &schib); 71 72 /* start subchannel command */ 73 orb.fmt = 1; 74 orb.cpa = (u32)(long)&ccw; 75 orb.lpm = 0x80; 76 77 ccw.cmd_code = cmd; 78 ccw.cda = (long)ptr; 79 ccw.count = len; 80 81 r = ssch(schid, &orb); 82 /* 83 * XXX Wait until device is done processing the CCW. For now we can 84 * assume that a simple tsch will have finished the CCW processing, 85 * but the architecture allows for asynchronous operation 86 */ 87 if (!r) { 88 r = drain_irqs(schid); 89 } 90 return r; 91 } 92 93 static void virtio_set_status(struct subchannel_id schid, 94 unsigned long dev_addr) 95 { 96 unsigned char status = dev_addr; 97 if (run_ccw(schid, CCW_CMD_WRITE_STATUS, &status, sizeof(status))) { 98 virtio_panic("Could not write status to host!\n"); 99 } 100 } 101 102 static void virtio_reset(struct subchannel_id schid) 103 { 104 run_ccw(schid, CCW_CMD_VDEV_RESET, NULL, 0); 105 } 106 107 static void vring_init(struct vring *vr, unsigned int num, void *p, 108 unsigned long align) 109 { 110 debug_print_addr("init p", p); 111 vr->num = num; 112 vr->desc = p; 113 vr->avail = p + num*sizeof(struct vring_desc); 114 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) 115 & ~(align - 1)); 116 117 /* We're running with interrupts off anyways, so don't bother */ 118 vr->used->flags = VRING_USED_F_NO_NOTIFY; 119 120 debug_print_addr("init vr", vr); 121 } 122 123 static void vring_notify(struct subchannel_id schid) 124 { 125 virtio_notify(schid); 126 } 127 128 static void vring_send_buf(struct vring *vr, void *p, int len, int flags) 129 { 130 /* For follow-up chains we need to keep the first entry point */ 131 if (!(flags & VRING_HIDDEN_IS_CHAIN)) { 132 vr->avail->ring[vr->avail->idx % vr->num] = vr->next_idx; 133 } 134 135 vr->desc[vr->next_idx].addr = (ulong)p; 136 vr->desc[vr->next_idx].len = len; 137 vr->desc[vr->next_idx].flags = flags & ~VRING_HIDDEN_IS_CHAIN; 138 vr->desc[vr->next_idx].next = vr->next_idx; 139 vr->desc[vr->next_idx].next++; 140 vr->next_idx++; 141 142 /* Chains only have a single ID */ 143 if (!(flags & VRING_DESC_F_NEXT)) { 144 vr->avail->idx++; 145 } 146 147 vr->used->idx = vr->next_idx; 148 } 149 150 static u64 get_clock(void) 151 { 152 u64 r; 153 154 asm volatile("stck %0" : "=Q" (r) : : "cc"); 155 return r; 156 } 157 158 static ulong get_second(void) 159 { 160 return (get_clock() >> 12) / 1000000; 161 } 162 163 /* 164 * Wait for the host to reply. 165 * 166 * timeout is in seconds if > 0. 167 * 168 * Returns 0 on success, 1 on timeout. 169 */ 170 static int vring_wait_reply(struct vring *vr, int timeout) 171 { 172 ulong target_second = get_second() + timeout; 173 struct subchannel_id schid = vr->schid; 174 int r = 0; 175 176 while (vr->used->idx == vr->next_idx) { 177 vring_notify(schid); 178 if (timeout && (get_second() >= target_second)) { 179 r = 1; 180 break; 181 } 182 yield(); 183 } 184 185 vr->next_idx = 0; 186 vr->desc[0].len = 0; 187 vr->desc[0].flags = 0; 188 189 return r; 190 } 191 192 /*********************************************** 193 * Virtio block * 194 ***********************************************/ 195 196 static int virtio_read_many(ulong sector, void *load_addr, int sec_num) 197 { 198 struct virtio_blk_outhdr out_hdr; 199 u8 status; 200 int r; 201 202 /* Tell the host we want to read */ 203 out_hdr.type = VIRTIO_BLK_T_IN; 204 out_hdr.ioprio = 99; 205 out_hdr.sector = sector; 206 207 vring_send_buf(&block, &out_hdr, sizeof(out_hdr), VRING_DESC_F_NEXT); 208 209 /* This is where we want to receive data */ 210 vring_send_buf(&block, load_addr, SECTOR_SIZE * sec_num, 211 VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN | 212 VRING_DESC_F_NEXT); 213 214 /* status field */ 215 vring_send_buf(&block, &status, sizeof(u8), VRING_DESC_F_WRITE | 216 VRING_HIDDEN_IS_CHAIN); 217 218 /* Now we can tell the host to read */ 219 vring_wait_reply(&block, 0); 220 221 r = drain_irqs(block.schid); 222 if (r) { 223 /* Well, whatever status is supposed to contain... */ 224 status = 1; 225 } 226 return status; 227 } 228 229 unsigned long virtio_load_direct(ulong rec_list1, ulong rec_list2, 230 ulong subchan_id, void *load_addr) 231 { 232 u8 status; 233 int sec = rec_list1; 234 int sec_num = (((rec_list2 >> 32)+ 1) & 0xffff); 235 int sec_len = rec_list2 >> 48; 236 ulong addr = (ulong)load_addr; 237 238 if (sec_len != SECTOR_SIZE) { 239 return -1; 240 } 241 242 sclp_print("."); 243 status = virtio_read_many(sec, (void*)addr, sec_num); 244 if (status) { 245 virtio_panic("I/O Error"); 246 } 247 addr += sec_num * SECTOR_SIZE; 248 249 return addr; 250 } 251 252 int virtio_read(ulong sector, void *load_addr) 253 { 254 return virtio_read_many(sector, load_addr, 1); 255 } 256 257 void virtio_setup_block(struct subchannel_id schid) 258 { 259 struct vq_info_block info; 260 struct vq_config_block config = {}; 261 262 virtio_reset(schid); 263 264 config.index = 0; 265 if (run_ccw(schid, CCW_CMD_READ_VQ_CONF, &config, sizeof(config))) { 266 virtio_panic("Could not get block device configuration\n"); 267 } 268 vring_init(&block, config.num, (void*)(100 * 1024 * 1024), 269 KVM_S390_VIRTIO_RING_ALIGN); 270 271 info.queue = (100ULL * 1024ULL* 1024ULL); 272 info.align = KVM_S390_VIRTIO_RING_ALIGN; 273 info.index = 0; 274 info.num = config.num; 275 block.schid = schid; 276 277 if (!run_ccw(schid, CCW_CMD_SET_VQ, &info, sizeof(info))) { 278 virtio_set_status(schid, VIRTIO_CONFIG_S_DRIVER_OK); 279 } 280 } 281 282 bool virtio_is_blk(struct subchannel_id schid) 283 { 284 int r; 285 struct senseid senseid = {}; 286 287 /* run sense id command */ 288 r = run_ccw(schid, CCW_CMD_SENSE_ID, &senseid, sizeof(senseid)); 289 if (r) { 290 return false; 291 } 292 if ((senseid.cu_type != 0x3832) || (senseid.cu_model != VIRTIO_ID_BLOCK)) { 293 return false; 294 } 295 296 return true; 297 } 298 299