1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtual SCSI, aka ibmvscsi 5 * 6 * Copyright (c) 2010,2011 Benjamin Herrenschmidt, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 * TODO: 27 * 28 * - Cleanups :-) 29 * - Sort out better how to assign devices to VSCSI instances 30 * - Fix residual counts 31 * - Add indirect descriptors support 32 * - Maybe do autosense (PAPR seems to mandate it, linux doesn't care) 33 */ 34 35 #include "qemu/osdep.h" 36 #include "qemu/module.h" 37 #include "hw/scsi/scsi.h" 38 #include "migration/vmstate.h" 39 #include "scsi/constants.h" 40 #include "srp.h" 41 #include "hw/ppc/spapr.h" 42 #include "hw/ppc/spapr_vio.h" 43 #include "hw/qdev-properties.h" 44 #include "viosrp.h" 45 #include "trace.h" 46 47 #include <libfdt.h> 48 #include "qom/object.h" 49 50 /* 51 * Virtual SCSI device 52 */ 53 54 /* Random numbers */ 55 #define VSCSI_MAX_SECTORS 4096 56 #define VSCSI_REQ_LIMIT 24 57 58 /* Maximum size of a IU payload */ 59 #define SRP_MAX_IU_DATA_LEN (SRP_MAX_IU_LEN - sizeof(union srp_iu)) 60 #define SRP_RSP_SENSE_DATA_LEN 18 61 62 #define SRP_REPORT_LUNS_WLUN 0xc10100000000000ULL 63 64 typedef union vscsi_crq { 65 struct viosrp_crq s; 66 uint8_t raw[16]; 67 } vscsi_crq; 68 69 typedef struct vscsi_req { 70 vscsi_crq crq; 71 uint8_t viosrp_iu_buf[SRP_MAX_IU_LEN]; 72 73 /* SCSI request tracking */ 74 SCSIRequest *sreq; 75 uint32_t qtag; /* qemu tag != srp tag */ 76 bool active; 77 bool writing; 78 bool dma_error; 79 uint32_t data_len; 80 uint32_t senselen; 81 uint8_t sense[SCSI_SENSE_BUF_SIZE]; 82 83 /* RDMA related bits */ 84 uint8_t dma_fmt; 85 uint16_t local_desc; 86 uint16_t total_desc; 87 uint16_t cdb_offset; 88 uint16_t cur_desc_num; 89 uint16_t cur_desc_offset; 90 } vscsi_req; 91 92 #define TYPE_VIO_SPAPR_VSCSI_DEVICE "spapr-vscsi" 93 OBJECT_DECLARE_SIMPLE_TYPE(VSCSIState, VIO_SPAPR_VSCSI_DEVICE) 94 95 struct VSCSIState { 96 SpaprVioDevice vdev; 97 SCSIBus bus; 98 vscsi_req reqs[VSCSI_REQ_LIMIT]; 99 }; 100 101 static union viosrp_iu *req_iu(vscsi_req *req) 102 { 103 return (union viosrp_iu *)req->viosrp_iu_buf; 104 } 105 106 static struct vscsi_req *vscsi_get_req(VSCSIState *s) 107 { 108 vscsi_req *req; 109 int i; 110 111 for (i = 0; i < VSCSI_REQ_LIMIT; i++) { 112 req = &s->reqs[i]; 113 if (!req->active) { 114 memset(req, 0, sizeof(*req)); 115 req->qtag = i; 116 req->active = 1; 117 return req; 118 } 119 } 120 return NULL; 121 } 122 123 static struct vscsi_req *vscsi_find_req(VSCSIState *s, uint64_t srp_tag) 124 { 125 vscsi_req *req; 126 int i; 127 128 for (i = 0; i < VSCSI_REQ_LIMIT; i++) { 129 req = &s->reqs[i]; 130 if (req_iu(req)->srp.cmd.tag == srp_tag) { 131 return req; 132 } 133 } 134 return NULL; 135 } 136 137 static void vscsi_put_req(vscsi_req *req) 138 { 139 if (req->sreq != NULL) { 140 scsi_req_unref(req->sreq); 141 } 142 req->sreq = NULL; 143 req->active = 0; 144 } 145 146 static SCSIDevice *vscsi_device_find(SCSIBus *bus, uint64_t srp_lun, int *lun) 147 { 148 int channel = 0, id = 0; 149 150 retry: 151 switch (srp_lun >> 62) { 152 case 0: 153 if ((srp_lun >> 56) != 0) { 154 channel = (srp_lun >> 56) & 0x3f; 155 id = (srp_lun >> 48) & 0xff; 156 srp_lun <<= 16; 157 goto retry; 158 } 159 *lun = (srp_lun >> 48) & 0xff; 160 break; 161 162 case 1: 163 *lun = (srp_lun >> 48) & 0x3fff; 164 break; 165 case 2: 166 channel = (srp_lun >> 53) & 0x7; 167 id = (srp_lun >> 56) & 0x3f; 168 *lun = (srp_lun >> 48) & 0x1f; 169 break; 170 case 3: 171 *lun = -1; 172 return NULL; 173 default: 174 abort(); 175 } 176 177 return scsi_device_find(bus, channel, id, *lun); 178 } 179 180 static int vscsi_send_iu(VSCSIState *s, vscsi_req *req, 181 uint64_t length, uint8_t format) 182 { 183 long rc, rc1; 184 185 assert(length <= SRP_MAX_IU_LEN); 186 187 /* First copy the SRP */ 188 rc = spapr_vio_dma_write(&s->vdev, req->crq.s.IU_data_ptr, 189 &req->viosrp_iu_buf, length); 190 if (rc) { 191 fprintf(stderr, "vscsi_send_iu: DMA write failure !\n"); 192 } 193 194 req->crq.s.valid = 0x80; 195 req->crq.s.format = format; 196 req->crq.s.reserved = 0x00; 197 req->crq.s.timeout = cpu_to_be16(0x0000); 198 req->crq.s.IU_length = cpu_to_be16(length); 199 req->crq.s.IU_data_ptr = req_iu(req)->srp.rsp.tag; /* right byte order */ 200 201 if (rc == 0) { 202 req->crq.s.status = VIOSRP_OK; 203 } else { 204 req->crq.s.status = VIOSRP_ADAPTER_FAIL; 205 } 206 207 rc1 = spapr_vio_send_crq(&s->vdev, req->crq.raw); 208 if (rc1) { 209 fprintf(stderr, "vscsi_send_iu: Error sending response\n"); 210 return rc1; 211 } 212 213 return rc; 214 } 215 216 static void vscsi_makeup_sense(VSCSIState *s, vscsi_req *req, 217 uint8_t key, uint8_t asc, uint8_t ascq) 218 { 219 req->senselen = SRP_RSP_SENSE_DATA_LEN; 220 221 /* Valid bit and 'current errors' */ 222 req->sense[0] = (0x1 << 7 | 0x70); 223 /* Sense key */ 224 req->sense[2] = key; 225 /* Additional sense length */ 226 req->sense[7] = 0xa; /* 10 bytes */ 227 /* Additional sense code */ 228 req->sense[12] = asc; 229 req->sense[13] = ascq; 230 } 231 232 static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req, 233 uint8_t status, int32_t res_in, int32_t res_out) 234 { 235 union viosrp_iu *iu = req_iu(req); 236 uint64_t tag = iu->srp.rsp.tag; 237 int total_len = sizeof(iu->srp.rsp); 238 uint8_t sol_not = iu->srp.cmd.sol_not; 239 240 trace_spapr_vscsi_send_rsp(status, res_in, res_out); 241 242 memset(iu, 0, sizeof(struct srp_rsp)); 243 iu->srp.rsp.opcode = SRP_RSP; 244 iu->srp.rsp.req_lim_delta = cpu_to_be32(1); 245 iu->srp.rsp.tag = tag; 246 247 /* Handle residuals */ 248 if (res_in < 0) { 249 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIUNDER; 250 res_in = -res_in; 251 } else if (res_in) { 252 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER; 253 } 254 if (res_out < 0) { 255 iu->srp.rsp.flags |= SRP_RSP_FLAG_DOUNDER; 256 res_out = -res_out; 257 } else if (res_out) { 258 iu->srp.rsp.flags |= SRP_RSP_FLAG_DOOVER; 259 } 260 iu->srp.rsp.data_in_res_cnt = cpu_to_be32(res_in); 261 iu->srp.rsp.data_out_res_cnt = cpu_to_be32(res_out); 262 263 /* We don't do response data */ 264 /* iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; */ 265 iu->srp.rsp.resp_data_len = cpu_to_be32(0); 266 267 /* Handle success vs. failure */ 268 iu->srp.rsp.status = status; 269 if (status) { 270 iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2; 271 if (req->senselen) { 272 int sense_data_len = MIN(req->senselen, SRP_MAX_IU_DATA_LEN); 273 274 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID; 275 iu->srp.rsp.sense_data_len = cpu_to_be32(sense_data_len); 276 memcpy(iu->srp.rsp.data, req->sense, sense_data_len); 277 total_len += sense_data_len; 278 } 279 } else { 280 iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1; 281 } 282 283 vscsi_send_iu(s, req, total_len, VIOSRP_SRP_FORMAT); 284 return 0; 285 } 286 287 static inline struct srp_direct_buf vscsi_swap_desc(struct srp_direct_buf desc) 288 { 289 desc.va = be64_to_cpu(desc.va); 290 desc.len = be32_to_cpu(desc.len); 291 return desc; 292 } 293 294 static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req, 295 unsigned n, unsigned buf_offset, 296 struct srp_direct_buf *ret) 297 { 298 struct srp_cmd *cmd = &req_iu(req)->srp.cmd; 299 300 switch (req->dma_fmt) { 301 case SRP_NO_DATA_DESC: { 302 trace_spapr_vscsi_fetch_desc_no_data(); 303 return 0; 304 } 305 case SRP_DATA_DESC_DIRECT: { 306 memcpy(ret, cmd->add_data + req->cdb_offset, sizeof(*ret)); 307 assert(req->cur_desc_num == 0); 308 trace_spapr_vscsi_fetch_desc_direct(); 309 break; 310 } 311 case SRP_DATA_DESC_INDIRECT: { 312 struct srp_indirect_buf *tmp = (struct srp_indirect_buf *) 313 (cmd->add_data + req->cdb_offset); 314 if (n < req->local_desc) { 315 *ret = tmp->desc_list[n]; 316 trace_spapr_vscsi_fetch_desc_indirect(req->qtag, n, 317 req->local_desc); 318 } else if (n < req->total_desc) { 319 int rc; 320 struct srp_direct_buf tbl_desc = vscsi_swap_desc(tmp->table_desc); 321 unsigned desc_offset = n * sizeof(struct srp_direct_buf); 322 323 if (desc_offset >= tbl_desc.len) { 324 trace_spapr_vscsi_fetch_desc_out_of_range(n, desc_offset); 325 return -1; 326 } 327 rc = spapr_vio_dma_read(&s->vdev, tbl_desc.va + desc_offset, 328 ret, sizeof(struct srp_direct_buf)); 329 if (rc) { 330 trace_spapr_vscsi_fetch_desc_dma_read_error(rc); 331 return -1; 332 } 333 trace_spapr_vscsi_fetch_desc_indirect_seg_ext(req->qtag, n, 334 req->total_desc, 335 tbl_desc.va, 336 tbl_desc.len); 337 } else { 338 trace_spapr_vscsi_fetch_desc_out_of_desc(); 339 return 0; 340 } 341 break; 342 } 343 default: 344 fprintf(stderr, "VSCSI: Unknown format %x\n", req->dma_fmt); 345 return -1; 346 } 347 348 *ret = vscsi_swap_desc(*ret); 349 if (buf_offset > ret->len) { 350 trace_spapr_vscsi_fetch_desc_out_of_desc_boundary(buf_offset, 351 req->cur_desc_num, 352 ret->len); 353 return -1; 354 } 355 ret->va += buf_offset; 356 ret->len -= buf_offset; 357 358 trace_spapr_vscsi_fetch_desc_done(req->cur_desc_num, req->cur_desc_offset, 359 ret->va, ret->len); 360 361 return ret->len ? 1 : 0; 362 } 363 364 static int vscsi_srp_direct_data(VSCSIState *s, vscsi_req *req, 365 uint8_t *buf, uint32_t len) 366 { 367 struct srp_direct_buf md; 368 uint32_t llen; 369 int rc = 0; 370 371 rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md); 372 if (rc < 0) { 373 return -1; 374 } else if (rc == 0) { 375 return 0; 376 } 377 378 llen = MIN(len, md.len); 379 if (llen) { 380 if (req->writing) { /* writing = to device = reading from memory */ 381 rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen); 382 } else { 383 rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen); 384 } 385 } 386 387 if (rc) { 388 return -1; 389 } 390 req->cur_desc_offset += llen; 391 392 return llen; 393 } 394 395 static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req, 396 uint8_t *buf, uint32_t len) 397 { 398 struct srp_direct_buf md; 399 int rc = 0; 400 uint32_t llen, total = 0; 401 402 trace_spapr_vscsi_srp_indirect_data(len); 403 404 /* While we have data ... */ 405 while (len) { 406 rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md); 407 if (rc < 0) { 408 return -1; 409 } else if (rc == 0) { 410 break; 411 } 412 413 /* Perform transfer */ 414 llen = MIN(len, md.len); 415 if (req->writing) { /* writing = to device = reading from memory */ 416 rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen); 417 } else { 418 rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen); 419 } 420 if (rc) { 421 trace_spapr_vscsi_srp_indirect_data_rw(req->writing, rc); 422 break; 423 } 424 trace_spapr_vscsi_srp_indirect_data_buf(buf[0], buf[1], buf[2], buf[3]); 425 426 len -= llen; 427 buf += llen; 428 429 total += llen; 430 431 /* Update current position in the current descriptor */ 432 req->cur_desc_offset += llen; 433 if (md.len == llen) { 434 /* Go to the next descriptor if the current one finished */ 435 ++req->cur_desc_num; 436 req->cur_desc_offset = 0; 437 } 438 } 439 440 return rc ? -1 : total; 441 } 442 443 static int vscsi_srp_transfer_data(VSCSIState *s, vscsi_req *req, 444 int writing, uint8_t *buf, uint32_t len) 445 { 446 int err = 0; 447 448 switch (req->dma_fmt) { 449 case SRP_NO_DATA_DESC: 450 trace_spapr_vscsi_srp_transfer_data(len); 451 break; 452 case SRP_DATA_DESC_DIRECT: 453 err = vscsi_srp_direct_data(s, req, buf, len); 454 break; 455 case SRP_DATA_DESC_INDIRECT: 456 err = vscsi_srp_indirect_data(s, req, buf, len); 457 break; 458 } 459 return err; 460 } 461 462 /* Bits from linux srp */ 463 static int data_out_desc_size(struct srp_cmd *cmd) 464 { 465 int size = 0; 466 uint8_t fmt = cmd->buf_fmt >> 4; 467 468 switch (fmt) { 469 case SRP_NO_DATA_DESC: 470 break; 471 case SRP_DATA_DESC_DIRECT: 472 size = sizeof(struct srp_direct_buf); 473 break; 474 case SRP_DATA_DESC_INDIRECT: 475 size = sizeof(struct srp_indirect_buf) + 476 sizeof(struct srp_direct_buf)*cmd->data_out_desc_cnt; 477 break; 478 default: 479 break; 480 } 481 return size; 482 } 483 484 static int vscsi_preprocess_desc(vscsi_req *req) 485 { 486 struct srp_cmd *cmd = &req_iu(req)->srp.cmd; 487 488 req->cdb_offset = cmd->add_cdb_len & ~3; 489 490 if (req->writing) { 491 req->dma_fmt = cmd->buf_fmt >> 4; 492 } else { 493 req->cdb_offset += data_out_desc_size(cmd); 494 req->dma_fmt = cmd->buf_fmt & ((1U << 4) - 1); 495 } 496 497 switch (req->dma_fmt) { 498 case SRP_NO_DATA_DESC: 499 break; 500 case SRP_DATA_DESC_DIRECT: 501 req->total_desc = req->local_desc = 1; 502 break; 503 case SRP_DATA_DESC_INDIRECT: { 504 struct srp_indirect_buf *ind_tmp = (struct srp_indirect_buf *) 505 (cmd->add_data + req->cdb_offset); 506 507 req->total_desc = be32_to_cpu(ind_tmp->table_desc.len) / 508 sizeof(struct srp_direct_buf); 509 req->local_desc = req->writing ? cmd->data_out_desc_cnt : 510 cmd->data_in_desc_cnt; 511 break; 512 } 513 default: 514 fprintf(stderr, 515 "vscsi_preprocess_desc: Unknown format %x\n", req->dma_fmt); 516 return -1; 517 } 518 519 return 0; 520 } 521 522 /* Callback to indicate that the SCSI layer has completed a transfer. */ 523 static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len) 524 { 525 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent); 526 vscsi_req *req = sreq->hba_private; 527 uint8_t *buf; 528 int rc = 0; 529 530 trace_spapr_vscsi_transfer_data(sreq->tag, len, req); 531 if (req == NULL) { 532 fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag); 533 return; 534 } 535 536 if (len) { 537 buf = scsi_req_get_buf(sreq); 538 rc = vscsi_srp_transfer_data(s, req, req->writing, buf, len); 539 } 540 if (rc < 0) { 541 fprintf(stderr, "VSCSI: RDMA error rc=%d!\n", rc); 542 req->dma_error = true; 543 scsi_req_cancel(req->sreq); 544 return; 545 } 546 547 /* Start next chunk */ 548 req->data_len -= rc; 549 scsi_req_continue(sreq); 550 } 551 552 /* Callback to indicate that the SCSI layer has completed a transfer. */ 553 static void vscsi_command_complete(SCSIRequest *sreq, size_t resid) 554 { 555 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent); 556 vscsi_req *req = sreq->hba_private; 557 int32_t res_in = 0, res_out = 0; 558 559 trace_spapr_vscsi_command_complete(sreq->tag, sreq->status, req); 560 if (req == NULL) { 561 fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag); 562 return; 563 } 564 565 if (sreq->status == CHECK_CONDITION) { 566 req->senselen = scsi_req_get_sense(req->sreq, req->sense, 567 sizeof(req->sense)); 568 trace_spapr_vscsi_command_complete_sense_data1(req->senselen, 569 req->sense[0], req->sense[1], req->sense[2], req->sense[3], 570 req->sense[4], req->sense[5], req->sense[6], req->sense[7]); 571 trace_spapr_vscsi_command_complete_sense_data2( 572 req->sense[8], req->sense[9], req->sense[10], req->sense[11], 573 req->sense[12], req->sense[13], req->sense[14], req->sense[15]); 574 } 575 576 trace_spapr_vscsi_command_complete_status(sreq->status); 577 if (sreq->status == 0) { 578 /* We handle overflows, not underflows for normal commands, 579 * but hopefully nobody cares 580 */ 581 if (req->writing) { 582 res_out = req->data_len; 583 } else { 584 res_in = req->data_len; 585 } 586 } 587 vscsi_send_rsp(s, req, sreq->status, res_in, res_out); 588 vscsi_put_req(req); 589 } 590 591 static void vscsi_request_cancelled(SCSIRequest *sreq) 592 { 593 vscsi_req *req = sreq->hba_private; 594 595 if (req->dma_error) { 596 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent); 597 598 vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); 599 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); 600 } 601 vscsi_put_req(req); 602 } 603 604 static const VMStateDescription vmstate_spapr_vscsi_req = { 605 .name = "spapr_vscsi_req", 606 .version_id = 1, 607 .minimum_version_id = 1, 608 .fields = (VMStateField[]) { 609 VMSTATE_BUFFER(crq.raw, vscsi_req), 610 VMSTATE_BUFFER(viosrp_iu_buf, vscsi_req), 611 VMSTATE_UINT32(qtag, vscsi_req), 612 VMSTATE_BOOL(active, vscsi_req), 613 VMSTATE_UINT32(data_len, vscsi_req), 614 VMSTATE_BOOL(writing, vscsi_req), 615 VMSTATE_UINT32(senselen, vscsi_req), 616 VMSTATE_BUFFER(sense, vscsi_req), 617 VMSTATE_UINT8(dma_fmt, vscsi_req), 618 VMSTATE_UINT16(local_desc, vscsi_req), 619 VMSTATE_UINT16(total_desc, vscsi_req), 620 VMSTATE_UINT16(cdb_offset, vscsi_req), 621 /*Restart SCSI request from the beginning for now */ 622 /*VMSTATE_UINT16(cur_desc_num, vscsi_req), 623 VMSTATE_UINT16(cur_desc_offset, vscsi_req),*/ 624 VMSTATE_END_OF_LIST() 625 }, 626 }; 627 628 static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq) 629 { 630 vscsi_req *req = sreq->hba_private; 631 assert(req->active); 632 633 vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL); 634 635 trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num, 636 req->cur_desc_offset); 637 } 638 639 static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq) 640 { 641 SCSIBus *bus = sreq->bus; 642 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(bus->qbus.parent); 643 vscsi_req *req; 644 int rc; 645 646 assert(sreq->tag < VSCSI_REQ_LIMIT); 647 req = &s->reqs[sreq->tag]; 648 assert(!req->active); 649 650 memset(req, 0, sizeof(*req)); 651 rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1); 652 if (rc) { 653 fprintf(stderr, "VSCSI: failed loading request tag#%u\n", sreq->tag); 654 return NULL; 655 } 656 assert(req->active); 657 658 req->sreq = scsi_req_ref(sreq); 659 660 trace_spapr_vscsi_load_request(req->qtag, req->cur_desc_num, 661 req->cur_desc_offset); 662 663 return req; 664 } 665 666 static void vscsi_process_login(VSCSIState *s, vscsi_req *req) 667 { 668 union viosrp_iu *iu = req_iu(req); 669 struct srp_login_rsp *rsp = &iu->srp.login_rsp; 670 uint64_t tag = iu->srp.rsp.tag; 671 672 trace_spapr_vscsi_process_login(); 673 674 /* TODO handle case that requested size is wrong and 675 * buffer format is wrong 676 */ 677 memset(iu, 0, sizeof(struct srp_login_rsp)); 678 rsp->opcode = SRP_LOGIN_RSP; 679 /* Don't advertise quite as many request as we support to 680 * keep room for management stuff etc... 681 */ 682 rsp->req_lim_delta = cpu_to_be32(VSCSI_REQ_LIMIT-2); 683 rsp->tag = tag; 684 rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 685 rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 686 /* direct and indirect */ 687 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); 688 689 vscsi_send_iu(s, req, sizeof(*rsp), VIOSRP_SRP_FORMAT); 690 } 691 692 static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req) 693 { 694 uint8_t *cdb = req_iu(req)->srp.cmd.cdb; 695 uint8_t resp_data[36]; 696 int rc, len, alen; 697 698 /* We don't do EVPD. Also check that page_code is 0 */ 699 if ((cdb[1] & 0x01) || cdb[2] != 0) { 700 /* Send INVALID FIELD IN CDB */ 701 vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0); 702 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); 703 return; 704 } 705 alen = cdb[3]; 706 alen = (alen << 8) | cdb[4]; 707 len = MIN(alen, 36); 708 709 /* Fake up inquiry using PQ=3 */ 710 memset(resp_data, 0, 36); 711 resp_data[0] = 0x7f; /* Not capable of supporting a device here */ 712 resp_data[2] = 0x06; /* SPS-4 */ 713 resp_data[3] = 0x02; /* Resp data format */ 714 resp_data[4] = 36 - 5; /* Additional length */ 715 resp_data[7] = 0x10; /* Sync transfers */ 716 memcpy(&resp_data[16], "QEMU EMPTY ", 16); 717 memcpy(&resp_data[8], "QEMU ", 8); 718 719 req->writing = 0; 720 vscsi_preprocess_desc(req); 721 rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len); 722 if (rc < 0) { 723 vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); 724 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); 725 } else { 726 vscsi_send_rsp(s, req, 0, 36 - rc, 0); 727 } 728 } 729 730 static void vscsi_report_luns(VSCSIState *s, vscsi_req *req) 731 { 732 BusChild *kid; 733 int i, len, n, rc; 734 uint8_t *resp_data; 735 bool found_lun0; 736 737 n = 0; 738 found_lun0 = false; 739 QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { 740 SCSIDevice *dev = SCSI_DEVICE(kid->child); 741 742 n += 8; 743 if (dev->channel == 0 && dev->id == 0 && dev->lun == 0) { 744 found_lun0 = true; 745 } 746 } 747 if (!found_lun0) { 748 n += 8; 749 } 750 len = n+8; 751 752 resp_data = g_malloc0(len); 753 stl_be_p(resp_data, n); 754 i = found_lun0 ? 8 : 16; 755 QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { 756 DeviceState *qdev = kid->child; 757 SCSIDevice *dev = SCSI_DEVICE(qdev); 758 759 if (dev->id == 0 && dev->channel == 0) { 760 resp_data[i] = 0; /* Use simple LUN for 0 (SAM5 4.7.7.1) */ 761 } else { 762 resp_data[i] = (2 << 6); /* Otherwise LUN addressing (4.7.7.4) */ 763 } 764 resp_data[i] |= dev->id; 765 resp_data[i+1] = (dev->channel << 5); 766 resp_data[i+1] |= dev->lun; 767 i += 8; 768 } 769 770 vscsi_preprocess_desc(req); 771 rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len); 772 g_free(resp_data); 773 if (rc < 0) { 774 vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); 775 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); 776 } else { 777 vscsi_send_rsp(s, req, 0, len - rc, 0); 778 } 779 } 780 781 static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) 782 { 783 union srp_iu *srp = &req_iu(req)->srp; 784 SCSIDevice *sdev; 785 int n, lun; 786 787 if ((srp->cmd.lun == 0 || be64_to_cpu(srp->cmd.lun) == SRP_REPORT_LUNS_WLUN) 788 && srp->cmd.cdb[0] == REPORT_LUNS) { 789 vscsi_report_luns(s, req); 790 return 0; 791 } 792 793 sdev = vscsi_device_find(&s->bus, be64_to_cpu(srp->cmd.lun), &lun); 794 if (!sdev) { 795 trace_spapr_vscsi_queue_cmd_no_drive(be64_to_cpu(srp->cmd.lun)); 796 if (srp->cmd.cdb[0] == INQUIRY) { 797 vscsi_inquiry_no_target(s, req); 798 } else { 799 vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0x00); 800 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); 801 } return 1; 802 } 803 804 req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req); 805 n = scsi_req_enqueue(req->sreq); 806 807 trace_spapr_vscsi_queue_cmd(req->qtag, srp->cmd.cdb[0], 808 scsi_command_name(srp->cmd.cdb[0]), lun, n); 809 810 if (n) { 811 /* Transfer direction must be set before preprocessing the 812 * descriptors 813 */ 814 req->writing = (n < 1); 815 816 /* Preprocess RDMA descriptors */ 817 vscsi_preprocess_desc(req); 818 819 /* Get transfer direction and initiate transfer */ 820 if (n > 0) { 821 req->data_len = n; 822 } else if (n < 0) { 823 req->data_len = -n; 824 } 825 scsi_req_continue(req->sreq); 826 } 827 /* Don't touch req here, it may have been recycled already */ 828 829 return 0; 830 } 831 832 static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req) 833 { 834 union viosrp_iu *iu = req_iu(req); 835 vscsi_req *tmpreq; 836 int i, lun = 0, resp = SRP_TSK_MGMT_COMPLETE; 837 SCSIDevice *d; 838 uint64_t tag = iu->srp.rsp.tag; 839 uint8_t sol_not = iu->srp.cmd.sol_not; 840 841 trace_spapr_vscsi_process_tsk_mgmt(iu->srp.tsk_mgmt.tsk_mgmt_func); 842 d = vscsi_device_find(&s->bus, 843 be64_to_cpu(req_iu(req)->srp.tsk_mgmt.lun), &lun); 844 if (!d) { 845 resp = SRP_TSK_MGMT_FIELDS_INVALID; 846 } else { 847 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) { 848 case SRP_TSK_ABORT_TASK: 849 if (d->lun != lun) { 850 resp = SRP_TSK_MGMT_FIELDS_INVALID; 851 break; 852 } 853 854 tmpreq = vscsi_find_req(s, req_iu(req)->srp.tsk_mgmt.task_tag); 855 if (tmpreq && tmpreq->sreq) { 856 assert(tmpreq->sreq->hba_private); 857 scsi_req_cancel(tmpreq->sreq); 858 } 859 break; 860 861 case SRP_TSK_LUN_RESET: 862 if (d->lun != lun) { 863 resp = SRP_TSK_MGMT_FIELDS_INVALID; 864 break; 865 } 866 867 qdev_reset_all(&d->qdev); 868 break; 869 870 case SRP_TSK_ABORT_TASK_SET: 871 case SRP_TSK_CLEAR_TASK_SET: 872 if (d->lun != lun) { 873 resp = SRP_TSK_MGMT_FIELDS_INVALID; 874 break; 875 } 876 877 for (i = 0; i < VSCSI_REQ_LIMIT; i++) { 878 tmpreq = &s->reqs[i]; 879 if (req_iu(tmpreq)->srp.cmd.lun 880 != req_iu(req)->srp.tsk_mgmt.lun) { 881 continue; 882 } 883 if (!tmpreq->active || !tmpreq->sreq) { 884 continue; 885 } 886 assert(tmpreq->sreq->hba_private); 887 scsi_req_cancel(tmpreq->sreq); 888 } 889 break; 890 891 case SRP_TSK_CLEAR_ACA: 892 resp = SRP_TSK_MGMT_NOT_SUPPORTED; 893 break; 894 895 default: 896 resp = SRP_TSK_MGMT_FIELDS_INVALID; 897 break; 898 } 899 } 900 901 /* Compose the response here as */ 902 QEMU_BUILD_BUG_ON(SRP_MAX_IU_DATA_LEN < 4); 903 memset(iu, 0, sizeof(struct srp_rsp) + 4); 904 iu->srp.rsp.opcode = SRP_RSP; 905 iu->srp.rsp.req_lim_delta = cpu_to_be32(1); 906 iu->srp.rsp.tag = tag; 907 iu->srp.rsp.flags |= SRP_RSP_FLAG_RSPVALID; 908 iu->srp.rsp.resp_data_len = cpu_to_be32(4); 909 if (resp) { 910 iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2; 911 } else { 912 iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1; 913 } 914 915 iu->srp.rsp.status = GOOD; 916 iu->srp.rsp.data[3] = resp; 917 918 vscsi_send_iu(s, req, sizeof(iu->srp.rsp) + 4, VIOSRP_SRP_FORMAT); 919 920 return 1; 921 } 922 923 static int vscsi_handle_srp_req(VSCSIState *s, vscsi_req *req) 924 { 925 union srp_iu *srp = &req_iu(req)->srp; 926 int done = 1; 927 uint8_t opcode = srp->rsp.opcode; 928 929 switch (opcode) { 930 case SRP_LOGIN_REQ: 931 vscsi_process_login(s, req); 932 break; 933 case SRP_TSK_MGMT: 934 done = vscsi_process_tsk_mgmt(s, req); 935 break; 936 case SRP_CMD: 937 done = vscsi_queue_cmd(s, req); 938 break; 939 case SRP_LOGIN_RSP: 940 case SRP_I_LOGOUT: 941 case SRP_T_LOGOUT: 942 case SRP_RSP: 943 case SRP_CRED_REQ: 944 case SRP_CRED_RSP: 945 case SRP_AER_REQ: 946 case SRP_AER_RSP: 947 fprintf(stderr, "VSCSI: Unsupported opcode %02x\n", opcode); 948 break; 949 default: 950 fprintf(stderr, "VSCSI: Unknown type %02x\n", opcode); 951 } 952 953 return done; 954 } 955 956 static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req) 957 { 958 struct viosrp_adapter_info *sinfo; 959 struct mad_adapter_info_data info; 960 int rc; 961 962 sinfo = &req_iu(req)->mad.adapter_info; 963 964 #if 0 /* What for ? */ 965 rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer), 966 &info, be16_to_cpu(sinfo->common.length)); 967 if (rc) { 968 fprintf(stderr, "vscsi_send_adapter_info: DMA read failure !\n"); 969 } 970 #endif 971 memset(&info, 0, sizeof(info)); 972 strcpy(info.srp_version, SRP_VERSION); 973 memcpy(info.partition_name, "qemu", sizeof("qemu")); 974 info.partition_number = cpu_to_be32(0); 975 info.mad_version = cpu_to_be32(1); 976 info.os_type = cpu_to_be32(2); 977 info.port_max_txu[0] = cpu_to_be32(VSCSI_MAX_SECTORS << 9); 978 979 rc = spapr_vio_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer), 980 &info, be16_to_cpu(sinfo->common.length)); 981 if (rc) { 982 fprintf(stderr, "vscsi_send_adapter_info: DMA write failure !\n"); 983 } 984 985 sinfo->common.status = rc ? cpu_to_be32(1) : 0; 986 987 return vscsi_send_iu(s, req, sizeof(*sinfo), VIOSRP_MAD_FORMAT); 988 } 989 990 static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req) 991 { 992 struct viosrp_capabilities *vcap; 993 struct capabilities cap = { }; 994 uint16_t len, req_len; 995 uint64_t buffer; 996 int rc; 997 998 vcap = &req_iu(req)->mad.capabilities; 999 req_len = len = be16_to_cpu(vcap->common.length); 1000 buffer = be64_to_cpu(vcap->buffer); 1001 if (len > sizeof(cap)) { 1002 fprintf(stderr, "vscsi_send_capabilities: capabilities size mismatch !\n"); 1003 1004 /* 1005 * Just read and populate the structure that is known. 1006 * Zero rest of the structure. 1007 */ 1008 len = sizeof(cap); 1009 } 1010 rc = spapr_vio_dma_read(&s->vdev, buffer, &cap, len); 1011 if (rc) { 1012 fprintf(stderr, "vscsi_send_capabilities: DMA read failure !\n"); 1013 } 1014 1015 /* 1016 * Current implementation does not support any migration or 1017 * reservation capabilities. Construct the response telling the 1018 * guest not to use them. 1019 */ 1020 cap.flags = 0; 1021 cap.migration.ecl = 0; 1022 cap.reserve.type = 0; 1023 cap.migration.common.server_support = 0; 1024 cap.reserve.common.server_support = 0; 1025 1026 rc = spapr_vio_dma_write(&s->vdev, buffer, &cap, len); 1027 if (rc) { 1028 fprintf(stderr, "vscsi_send_capabilities: DMA write failure !\n"); 1029 } 1030 if (req_len > len) { 1031 /* 1032 * Being paranoid and lets not worry about the error code 1033 * here. Actual write of the cap is done above. 1034 */ 1035 spapr_vio_dma_set(&s->vdev, (buffer + len), 0, (req_len - len)); 1036 } 1037 vcap->common.status = rc ? cpu_to_be32(1) : 0; 1038 return vscsi_send_iu(s, req, sizeof(*vcap), VIOSRP_MAD_FORMAT); 1039 } 1040 1041 static int vscsi_handle_mad_req(VSCSIState *s, vscsi_req *req) 1042 { 1043 union mad_iu *mad = &req_iu(req)->mad; 1044 bool request_handled = false; 1045 uint64_t retlen = 0; 1046 1047 switch (be32_to_cpu(mad->empty_iu.common.type)) { 1048 case VIOSRP_EMPTY_IU_TYPE: 1049 fprintf(stderr, "Unsupported EMPTY MAD IU\n"); 1050 retlen = sizeof(mad->empty_iu); 1051 break; 1052 case VIOSRP_ERROR_LOG_TYPE: 1053 fprintf(stderr, "Unsupported ERROR LOG MAD IU\n"); 1054 retlen = sizeof(mad->error_log); 1055 break; 1056 case VIOSRP_ADAPTER_INFO_TYPE: 1057 vscsi_send_adapter_info(s, req); 1058 request_handled = true; 1059 break; 1060 case VIOSRP_HOST_CONFIG_TYPE: 1061 retlen = sizeof(mad->host_config); 1062 break; 1063 case VIOSRP_CAPABILITIES_TYPE: 1064 vscsi_send_capabilities(s, req); 1065 request_handled = true; 1066 break; 1067 default: 1068 fprintf(stderr, "VSCSI: Unknown MAD type %02x\n", 1069 be32_to_cpu(mad->empty_iu.common.type)); 1070 /* 1071 * PAPR+ says that "The length field is set to the length 1072 * of the data structure(s) used in the command". 1073 * As we did not recognize the request type, put zero there. 1074 */ 1075 retlen = 0; 1076 } 1077 1078 if (!request_handled) { 1079 mad->empty_iu.common.status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); 1080 vscsi_send_iu(s, req, retlen, VIOSRP_MAD_FORMAT); 1081 } 1082 1083 return 1; 1084 } 1085 1086 static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq) 1087 { 1088 vscsi_req *req; 1089 int done; 1090 1091 req = vscsi_get_req(s); 1092 if (req == NULL) { 1093 fprintf(stderr, "VSCSI: Failed to get a request !\n"); 1094 return; 1095 } 1096 1097 /* We only support a limited number of descriptors, we know 1098 * the ibmvscsi driver uses up to 10 max, so it should fit 1099 * in our 256 bytes IUs. If not we'll have to increase the size 1100 * of the structure. 1101 */ 1102 if (crq->s.IU_length > SRP_MAX_IU_LEN) { 1103 fprintf(stderr, "VSCSI: SRP IU too long (%d bytes) !\n", 1104 crq->s.IU_length); 1105 vscsi_put_req(req); 1106 return; 1107 } 1108 1109 /* XXX Handle failure differently ? */ 1110 if (spapr_vio_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->viosrp_iu_buf, 1111 crq->s.IU_length)) { 1112 fprintf(stderr, "vscsi_got_payload: DMA read failure !\n"); 1113 vscsi_put_req(req); 1114 return; 1115 } 1116 memcpy(&req->crq, crq, sizeof(vscsi_crq)); 1117 1118 if (crq->s.format == VIOSRP_MAD_FORMAT) { 1119 done = vscsi_handle_mad_req(s, req); 1120 } else { 1121 done = vscsi_handle_srp_req(s, req); 1122 } 1123 1124 if (done) { 1125 vscsi_put_req(req); 1126 } 1127 } 1128 1129 1130 static int vscsi_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data) 1131 { 1132 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev); 1133 vscsi_crq crq; 1134 1135 memcpy(crq.raw, crq_data, 16); 1136 crq.s.timeout = be16_to_cpu(crq.s.timeout); 1137 crq.s.IU_length = be16_to_cpu(crq.s.IU_length); 1138 crq.s.IU_data_ptr = be64_to_cpu(crq.s.IU_data_ptr); 1139 1140 trace_spapr_vscsi_do_crq(crq.raw[0], crq.raw[1]); 1141 1142 switch (crq.s.valid) { 1143 case 0xc0: /* Init command/response */ 1144 1145 /* Respond to initialization request */ 1146 if (crq.s.format == 0x01) { 1147 memset(crq.raw, 0, 16); 1148 crq.s.valid = 0xc0; 1149 crq.s.format = 0x02; 1150 spapr_vio_send_crq(dev, crq.raw); 1151 } 1152 1153 /* Note that in hotplug cases, we might get a 0x02 1154 * as a result of us emitting the init request 1155 */ 1156 1157 break; 1158 case 0xff: /* Link event */ 1159 1160 /* Not handled for now */ 1161 1162 break; 1163 case 0x80: /* Payloads */ 1164 switch (crq.s.format) { 1165 case VIOSRP_SRP_FORMAT: /* AKA VSCSI request */ 1166 case VIOSRP_MAD_FORMAT: /* AKA VSCSI response */ 1167 vscsi_got_payload(s, &crq); 1168 break; 1169 case VIOSRP_OS400_FORMAT: 1170 case VIOSRP_AIX_FORMAT: 1171 case VIOSRP_LINUX_FORMAT: 1172 case VIOSRP_INLINE_FORMAT: 1173 fprintf(stderr, "vscsi_do_srq: Unsupported payload format %02x\n", 1174 crq.s.format); 1175 break; 1176 default: 1177 fprintf(stderr, "vscsi_do_srq: Unknown payload format %02x\n", 1178 crq.s.format); 1179 } 1180 break; 1181 default: 1182 fprintf(stderr, "vscsi_do_crq: unknown CRQ %02x %02x ...\n", 1183 crq.raw[0], crq.raw[1]); 1184 }; 1185 1186 return 0; 1187 } 1188 1189 static const struct SCSIBusInfo vscsi_scsi_info = { 1190 .tcq = true, 1191 .max_channel = 7, /* logical unit addressing format */ 1192 .max_target = 63, 1193 .max_lun = 31, 1194 1195 .transfer_data = vscsi_transfer_data, 1196 .complete = vscsi_command_complete, 1197 .cancel = vscsi_request_cancelled, 1198 .save_request = vscsi_save_request, 1199 .load_request = vscsi_load_request, 1200 }; 1201 1202 static void spapr_vscsi_reset(SpaprVioDevice *dev) 1203 { 1204 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev); 1205 int i; 1206 1207 memset(s->reqs, 0, sizeof(s->reqs)); 1208 for (i = 0; i < VSCSI_REQ_LIMIT; i++) { 1209 s->reqs[i].qtag = i; 1210 } 1211 } 1212 1213 static void spapr_vscsi_realize(SpaprVioDevice *dev, Error **errp) 1214 { 1215 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev); 1216 1217 dev->crq.SendFunc = vscsi_do_crq; 1218 1219 scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &vscsi_scsi_info); 1220 1221 /* ibmvscsi SCSI bus does not allow hotplug. */ 1222 qbus_set_hotplug_handler(BUS(&s->bus), NULL); 1223 } 1224 1225 void spapr_vscsi_create(SpaprVioBus *bus) 1226 { 1227 DeviceState *dev; 1228 1229 dev = qdev_new("spapr-vscsi"); 1230 1231 qdev_realize_and_unref(dev, &bus->bus, &error_fatal); 1232 scsi_bus_legacy_handle_cmdline(&VIO_SPAPR_VSCSI_DEVICE(dev)->bus); 1233 } 1234 1235 static int spapr_vscsi_devnode(SpaprVioDevice *dev, void *fdt, int node_off) 1236 { 1237 int ret; 1238 1239 ret = fdt_setprop_cell(fdt, node_off, "#address-cells", 2); 1240 if (ret < 0) { 1241 return ret; 1242 } 1243 1244 ret = fdt_setprop_cell(fdt, node_off, "#size-cells", 0); 1245 if (ret < 0) { 1246 return ret; 1247 } 1248 1249 return 0; 1250 } 1251 1252 static Property spapr_vscsi_properties[] = { 1253 DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev), 1254 DEFINE_PROP_END_OF_LIST(), 1255 }; 1256 1257 static const VMStateDescription vmstate_spapr_vscsi = { 1258 .name = "spapr_vscsi", 1259 .version_id = 1, 1260 .minimum_version_id = 1, 1261 .fields = (VMStateField[]) { 1262 VMSTATE_SPAPR_VIO(vdev, VSCSIState), 1263 /* VSCSI state */ 1264 /* ???? */ 1265 1266 VMSTATE_END_OF_LIST() 1267 }, 1268 }; 1269 1270 static void spapr_vscsi_class_init(ObjectClass *klass, void *data) 1271 { 1272 DeviceClass *dc = DEVICE_CLASS(klass); 1273 SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); 1274 1275 k->realize = spapr_vscsi_realize; 1276 k->reset = spapr_vscsi_reset; 1277 k->devnode = spapr_vscsi_devnode; 1278 k->dt_name = "v-scsi"; 1279 k->dt_type = "vscsi"; 1280 k->dt_compatible = "IBM,v-scsi"; 1281 k->signal_mask = 0x00000001; 1282 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1283 device_class_set_props(dc, spapr_vscsi_properties); 1284 k->rtce_window_size = 0x10000000; 1285 dc->vmsd = &vmstate_spapr_vscsi; 1286 } 1287 1288 static const TypeInfo spapr_vscsi_info = { 1289 .name = TYPE_VIO_SPAPR_VSCSI_DEVICE, 1290 .parent = TYPE_VIO_SPAPR_DEVICE, 1291 .instance_size = sizeof(VSCSIState), 1292 .class_init = spapr_vscsi_class_init, 1293 }; 1294 1295 static void spapr_vscsi_register_types(void) 1296 { 1297 type_register_static(&spapr_vscsi_info); 1298 } 1299 1300 type_init(spapr_vscsi_register_types) 1301