xref: /openbmc/qemu/hw/scsi/scsi-bus.c (revision 58ee924b97d1c0898555647a31820c5a20d55a73)
1  #include "qemu/osdep.h"
2  #include "qapi/error.h"
3  #include "qemu/error-report.h"
4  #include "qemu/module.h"
5  #include "qemu/option.h"
6  #include "qemu/hw-version.h"
7  #include "hw/qdev-properties.h"
8  #include "hw/scsi/scsi.h"
9  #include "migration/qemu-file-types.h"
10  #include "migration/vmstate.h"
11  #include "scsi/constants.h"
12  #include "sysemu/block-backend.h"
13  #include "sysemu/blockdev.h"
14  #include "sysemu/sysemu.h"
15  #include "sysemu/runstate.h"
16  #include "trace.h"
17  #include "sysemu/dma.h"
18  #include "qemu/cutils.h"
19  
20  static char *scsibus_get_dev_path(DeviceState *dev);
21  static char *scsibus_get_fw_dev_path(DeviceState *dev);
22  static void scsi_req_dequeue(SCSIRequest *req);
23  static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
24  static void scsi_target_free_buf(SCSIRequest *req);
25  static void scsi_clear_reported_luns_changed(SCSIRequest *req);
26  
27  static int next_scsi_bus;
28  
do_scsi_device_find(SCSIBus * bus,int channel,int id,int lun,bool include_unrealized)29  static SCSIDevice *do_scsi_device_find(SCSIBus *bus,
30                                         int channel, int id, int lun,
31                                         bool include_unrealized)
32  {
33      BusChild *kid;
34      SCSIDevice *retval = NULL;
35  
36      QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) {
37          DeviceState *qdev = kid->child;
38          SCSIDevice *dev = SCSI_DEVICE(qdev);
39  
40          if (dev->channel == channel && dev->id == id) {
41              if (dev->lun == lun) {
42                  retval = dev;
43                  break;
44              }
45  
46              /*
47               * If we don't find exact match (channel/bus/lun),
48               * we will return the first device which matches channel/bus
49               */
50  
51              if (!retval) {
52                  retval = dev;
53              }
54          }
55      }
56  
57      /*
58       * This function might run on the IO thread and we might race against
59       * main thread hot-plugging the device.
60       * We assume that as soon as .realized is set to true we can let
61       * the user access the device.
62       */
63  
64      if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) {
65          retval = NULL;
66      }
67  
68      return retval;
69  }
70  
scsi_device_find(SCSIBus * bus,int channel,int id,int lun)71  SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
72  {
73      RCU_READ_LOCK_GUARD();
74      return do_scsi_device_find(bus, channel, id, lun, false);
75  }
76  
scsi_device_get(SCSIBus * bus,int channel,int id,int lun)77  SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
78  {
79      SCSIDevice *d;
80      RCU_READ_LOCK_GUARD();
81      d = do_scsi_device_find(bus, channel, id, lun, false);
82      if (d) {
83          object_ref(d);
84      }
85      return d;
86  }
87  
88  /*
89   * Invoke @fn() for each enqueued request in device @s. Must be called from the
90   * main loop thread while the guest is stopped. This is only suitable for
91   * vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
92   */
scsi_device_for_each_req_sync(SCSIDevice * s,void (* fn)(SCSIRequest *,void *),void * opaque)93  static void scsi_device_for_each_req_sync(SCSIDevice *s,
94                                            void (*fn)(SCSIRequest *, void *),
95                                            void *opaque)
96  {
97      SCSIRequest *req;
98      SCSIRequest *next_req;
99  
100      assert(!runstate_is_running());
101      assert(qemu_in_main_thread());
102  
103      QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
104          fn(req, opaque);
105      }
106  }
107  
108  typedef struct {
109      SCSIDevice *s;
110      void (*fn)(SCSIRequest *, void *);
111      void *fn_opaque;
112  } SCSIDeviceForEachReqAsyncData;
113  
scsi_device_for_each_req_async_bh(void * opaque)114  static void scsi_device_for_each_req_async_bh(void *opaque)
115  {
116      g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
117      SCSIDevice *s = data->s;
118      AioContext *ctx;
119      SCSIRequest *req;
120      SCSIRequest *next;
121  
122      /*
123       * The BB cannot have changed contexts between this BH being scheduled and
124       * now: BBs' AioContexts, when they have a node attached, can only be
125       * changed via bdrv_try_change_aio_context(), in a drained section.  While
126       * we have the in-flight counter incremented, that drain must block.
127       */
128      ctx = blk_get_aio_context(s->conf.blk);
129      assert(ctx == qemu_get_current_aio_context());
130  
131      QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
132          data->fn(req, data->fn_opaque);
133      }
134  
135      /* Drop the reference taken by scsi_device_for_each_req_async() */
136      object_unref(OBJECT(s));
137  
138      /* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */
139      blk_dec_in_flight(s->conf.blk);
140  }
141  
142  /*
143   * Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
144   * runs in the AioContext that is executing the request.
145   * Keeps the BlockBackend's in-flight counter incremented until everything is
146   * done, so draining it will settle all scheduled @fn() calls.
147   */
scsi_device_for_each_req_async(SCSIDevice * s,void (* fn)(SCSIRequest *,void *),void * opaque)148  static void scsi_device_for_each_req_async(SCSIDevice *s,
149                                             void (*fn)(SCSIRequest *, void *),
150                                             void *opaque)
151  {
152      assert(qemu_in_main_thread());
153  
154      SCSIDeviceForEachReqAsyncData *data =
155          g_new(SCSIDeviceForEachReqAsyncData, 1);
156  
157      data->s = s;
158      data->fn = fn;
159      data->fn_opaque = opaque;
160  
161      /*
162       * Hold a reference to the SCSIDevice until
163       * scsi_device_for_each_req_async_bh() finishes.
164       */
165      object_ref(OBJECT(s));
166  
167      /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
168      blk_inc_in_flight(s->conf.blk);
169      aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
170                              scsi_device_for_each_req_async_bh,
171                              data);
172  }
173  
scsi_device_realize(SCSIDevice * s,Error ** errp)174  static void scsi_device_realize(SCSIDevice *s, Error **errp)
175  {
176      SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
177      if (sc->realize) {
178          sc->realize(s, errp);
179      }
180  }
181  
scsi_device_unrealize(SCSIDevice * s)182  static void scsi_device_unrealize(SCSIDevice *s)
183  {
184      SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
185      if (sc->unrealize) {
186          sc->unrealize(s);
187      }
188  }
189  
scsi_bus_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len,void * hba_private)190  int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
191                         size_t buf_len, void *hba_private)
192  {
193      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
194      int rc;
195  
196      assert(cmd->len == 0);
197      rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len);
198      if (bus->info->parse_cdb) {
199          rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private);
200      }
201      return rc;
202  }
203  
scsi_device_alloc_req(SCSIDevice * s,uint32_t tag,uint32_t lun,uint8_t * buf,void * hba_private)204  static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
205                                            uint8_t *buf, void *hba_private)
206  {
207      SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
208      if (sc->alloc_req) {
209          return sc->alloc_req(s, tag, lun, buf, hba_private);
210      }
211  
212      return NULL;
213  }
214  
scsi_device_unit_attention_reported(SCSIDevice * s)215  void scsi_device_unit_attention_reported(SCSIDevice *s)
216  {
217      SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
218      if (sc->unit_attention_reported) {
219          sc->unit_attention_reported(s);
220      }
221  }
222  
223  /* Create a scsi bus, and attach devices to it.  */
scsi_bus_init_named(SCSIBus * bus,size_t bus_size,DeviceState * host,const SCSIBusInfo * info,const char * bus_name)224  void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
225                           const SCSIBusInfo *info, const char *bus_name)
226  {
227      qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
228      bus->busnr = next_scsi_bus++;
229      bus->info = info;
230      qbus_set_bus_hotplug_handler(BUS(bus));
231  }
232  
scsi_req_retry(SCSIRequest * req)233  void scsi_req_retry(SCSIRequest *req)
234  {
235      req->retry = true;
236  }
237  
238  /* Called in the AioContext that is executing the request */
scsi_dma_restart_req(SCSIRequest * req,void * opaque)239  static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
240  {
241      scsi_req_ref(req);
242      if (req->retry) {
243          req->retry = false;
244          switch (req->cmd.mode) {
245              case SCSI_XFER_FROM_DEV:
246              case SCSI_XFER_TO_DEV:
247                  scsi_req_continue(req);
248                  break;
249              case SCSI_XFER_NONE:
250                  scsi_req_dequeue(req);
251                  scsi_req_enqueue(req);
252                  break;
253          }
254      }
255      scsi_req_unref(req);
256  }
257  
scsi_dma_restart_cb(void * opaque,bool running,RunState state)258  static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
259  {
260      SCSIDevice *s = opaque;
261  
262      assert(qemu_in_main_thread());
263  
264      if (!running) {
265          return;
266      }
267  
268      scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
269  }
270  
scsi_bus_is_address_free(SCSIBus * bus,int channel,int target,int lun,SCSIDevice ** p_dev)271  static bool scsi_bus_is_address_free(SCSIBus *bus,
272                                       int channel, int target, int lun,
273                                       SCSIDevice **p_dev)
274  {
275      SCSIDevice *d;
276  
277      RCU_READ_LOCK_GUARD();
278      d = do_scsi_device_find(bus, channel, target, lun, true);
279      if (d && d->lun == lun) {
280          if (p_dev) {
281              *p_dev = d;
282          }
283          return false;
284      }
285      if (p_dev) {
286          *p_dev = NULL;
287      }
288      return true;
289  }
290  
scsi_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)291  static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp)
292  {
293      SCSIDevice *dev = SCSI_DEVICE(qdev);
294      SCSIBus *bus = SCSI_BUS(qbus);
295  
296      if (dev->channel > bus->info->max_channel) {
297          error_setg(errp, "bad scsi channel id: %d", dev->channel);
298          return false;
299      }
300      if (dev->id != -1 && dev->id > bus->info->max_target) {
301          error_setg(errp, "bad scsi device id: %d", dev->id);
302          return false;
303      }
304      if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
305          error_setg(errp, "bad scsi device lun: %d", dev->lun);
306          return false;
307      }
308  
309      if (dev->id != -1 && dev->lun != -1) {
310          SCSIDevice *d;
311          if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) {
312              error_setg(errp, "lun already used by '%s'", d->qdev.id);
313              return false;
314          }
315      }
316  
317      return true;
318  }
319  
scsi_qdev_realize(DeviceState * qdev,Error ** errp)320  static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
321  {
322      SCSIDevice *dev = SCSI_DEVICE(qdev);
323      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
324      bool is_free;
325      Error *local_err = NULL;
326  
327      if (dev->id == -1) {
328          int id = -1;
329          if (dev->lun == -1) {
330              dev->lun = 0;
331          }
332          do {
333              is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL);
334          } while (!is_free && id < bus->info->max_target);
335          if (!is_free) {
336              error_setg(errp, "no free target");
337              return;
338          }
339          dev->id = id;
340      } else if (dev->lun == -1) {
341          int lun = -1;
342          do {
343              is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL);
344          } while (!is_free && lun < bus->info->max_lun);
345          if (!is_free) {
346              error_setg(errp, "no free lun");
347              return;
348          }
349          dev->lun = lun;
350      }
351  
352      QTAILQ_INIT(&dev->requests);
353      scsi_device_realize(dev, &local_err);
354      if (local_err) {
355          error_propagate(errp, local_err);
356          return;
357      }
358      dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
359              scsi_dma_restart_cb, dev);
360  }
361  
scsi_qdev_unrealize(DeviceState * qdev)362  static void scsi_qdev_unrealize(DeviceState *qdev)
363  {
364      SCSIDevice *dev = SCSI_DEVICE(qdev);
365  
366      if (dev->vmsentry) {
367          qemu_del_vm_change_state_handler(dev->vmsentry);
368      }
369  
370      scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
371  
372      scsi_device_unrealize(dev);
373  
374      blockdev_mark_auto_del(dev->conf.blk);
375  }
376  
377  /* handle legacy '-drive if=scsi,...' cmd line args */
scsi_bus_legacy_add_drive(SCSIBus * bus,BlockBackend * blk,int unit,bool removable,BlockConf * conf,const char * serial,Error ** errp)378  SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
379                                        int unit, bool removable, BlockConf *conf,
380                                        const char *serial, Error **errp)
381  {
382      const char *driver;
383      char *name;
384      DeviceState *dev;
385      SCSIDevice *s;
386      DriveInfo *dinfo;
387      Error *local_err = NULL;
388  
389      if (blk_is_sg(blk)) {
390          driver = "scsi-generic";
391      } else {
392          dinfo = blk_legacy_dinfo(blk);
393          if (dinfo && dinfo->media_cd) {
394              driver = "scsi-cd";
395          } else {
396              driver = "scsi-hd";
397          }
398      }
399      dev = qdev_new(driver);
400      name = g_strdup_printf("legacy[%d]", unit);
401      object_property_add_child(OBJECT(bus), name, OBJECT(dev));
402      g_free(name);
403  
404      s = SCSI_DEVICE(dev);
405      s->conf = *conf;
406  
407      check_boot_index(conf->bootindex, &local_err);
408      if (local_err) {
409          object_unparent(OBJECT(dev));
410          error_propagate(errp, local_err);
411          return NULL;
412      }
413      add_boot_device_path(conf->bootindex, dev, NULL);
414  
415      qdev_prop_set_uint32(dev, "scsi-id", unit);
416      if (object_property_find(OBJECT(dev), "removable")) {
417          qdev_prop_set_bit(dev, "removable", removable);
418      }
419      if (serial && object_property_find(OBJECT(dev), "serial")) {
420          qdev_prop_set_string(dev, "serial", serial);
421      }
422      if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) {
423          object_unparent(OBJECT(dev));
424          return NULL;
425      }
426  
427      if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
428          object_unparent(OBJECT(dev));
429          return NULL;
430      }
431      return s;
432  }
433  
scsi_bus_legacy_handle_cmdline(SCSIBus * bus)434  void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
435  {
436      Location loc;
437      DriveInfo *dinfo;
438      int unit;
439      BlockConf conf = {
440          .bootindex = -1,
441          .share_rw = false,
442          .rerror = BLOCKDEV_ON_ERROR_AUTO,
443          .werror = BLOCKDEV_ON_ERROR_AUTO,
444      };
445  
446      loc_push_none(&loc);
447      for (unit = 0; unit <= bus->info->max_target; unit++) {
448          dinfo = drive_get(IF_SCSI, bus->busnr, unit);
449          if (dinfo == NULL) {
450              continue;
451          }
452          qemu_opts_loc_restore(dinfo->opts);
453          scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
454                                    unit, false, &conf, NULL, &error_fatal);
455      }
456      loc_pop(&loc);
457  }
458  
scsi_invalid_field(SCSIRequest * req,uint8_t * buf)459  static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
460  {
461      scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
462      scsi_req_complete(req, CHECK_CONDITION);
463      return 0;
464  }
465  
466  static const struct SCSIReqOps reqops_invalid_field = {
467      .size         = sizeof(SCSIRequest),
468      .send_command = scsi_invalid_field
469  };
470  
471  /* SCSIReqOps implementation for invalid commands.  */
472  
scsi_invalid_command(SCSIRequest * req,uint8_t * buf)473  static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
474  {
475      scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
476      scsi_req_complete(req, CHECK_CONDITION);
477      return 0;
478  }
479  
480  static const struct SCSIReqOps reqops_invalid_opcode = {
481      .size         = sizeof(SCSIRequest),
482      .send_command = scsi_invalid_command
483  };
484  
485  /* SCSIReqOps implementation for unit attention conditions.  */
486  
scsi_fetch_unit_attention_sense(SCSIRequest * req)487  static void scsi_fetch_unit_attention_sense(SCSIRequest *req)
488  {
489      SCSISense *ua = NULL;
490  
491      if (req->dev->unit_attention.key == UNIT_ATTENTION) {
492          ua = &req->dev->unit_attention;
493      } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
494          ua = &req->bus->unit_attention;
495      }
496  
497      /*
498       * Fetch the unit attention sense immediately so that another
499       * scsi_req_new does not use reqops_unit_attention.
500       */
501      if (ua) {
502          scsi_req_build_sense(req, *ua);
503          *ua = SENSE_CODE(NO_SENSE);
504      }
505  }
506  
scsi_unit_attention(SCSIRequest * req,uint8_t * buf)507  static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
508  {
509      scsi_req_complete(req, CHECK_CONDITION);
510      return 0;
511  }
512  
513  static const struct SCSIReqOps reqops_unit_attention = {
514      .size         = sizeof(SCSIRequest),
515      .init_req     = scsi_fetch_unit_attention_sense,
516      .send_command = scsi_unit_attention
517  };
518  
519  /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
520     an invalid LUN.  */
521  
522  typedef struct SCSITargetReq SCSITargetReq;
523  
524  struct SCSITargetReq {
525      SCSIRequest req;
526      int len;
527      uint8_t *buf;
528      int buf_len;
529  };
530  
store_lun(uint8_t * outbuf,int lun)531  static void store_lun(uint8_t *outbuf, int lun)
532  {
533      if (lun < 256) {
534          /* Simple logical unit addressing method*/
535          outbuf[0] = 0;
536          outbuf[1] = lun;
537      } else {
538          /* Flat space addressing method */
539          outbuf[0] = 0x40 | (lun >> 8);
540          outbuf[1] = (lun & 255);
541      }
542  }
543  
scsi_target_emulate_report_luns(SCSITargetReq * r)544  static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
545  {
546      BusChild *kid;
547      int channel, id;
548      uint8_t tmp[8] = {0};
549      int len = 0;
550      GByteArray *buf;
551  
552      if (r->req.cmd.xfer < 16) {
553          return false;
554      }
555      if (r->req.cmd.buf[2] > 2) {
556          return false;
557      }
558  
559      /* reserve space for 63 LUNs*/
560      buf = g_byte_array_sized_new(512);
561  
562      channel = r->req.dev->channel;
563      id = r->req.dev->id;
564  
565      /* add size (will be updated later to correct value */
566      g_byte_array_append(buf, tmp, 8);
567      len += 8;
568  
569      /* add LUN0 */
570      g_byte_array_append(buf, tmp, 8);
571      len += 8;
572  
573      WITH_RCU_READ_LOCK_GUARD() {
574          QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) {
575              DeviceState *qdev = kid->child;
576              SCSIDevice *dev = SCSI_DEVICE(qdev);
577  
578              if (dev->channel == channel && dev->id == id && dev->lun != 0 &&
579                  qdev_is_realized(&dev->qdev)) {
580                  store_lun(tmp, dev->lun);
581                  g_byte_array_append(buf, tmp, 8);
582                  len += 8;
583              }
584          }
585      }
586  
587      r->buf_len = len;
588      r->buf = g_byte_array_free(buf, FALSE);
589      r->len = MIN(len, r->req.cmd.xfer & ~7);
590  
591      /* store the LUN list length */
592      stl_be_p(&r->buf[0], len - 8);
593  
594      /*
595       * If a REPORT LUNS command enters the enabled command state, [...]
596       * the device server shall clear any pending unit attention condition
597       * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
598       */
599      scsi_clear_reported_luns_changed(&r->req);
600  
601      return true;
602  }
603  
scsi_target_emulate_inquiry(SCSITargetReq * r)604  static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
605  {
606      assert(r->req.dev->lun != r->req.lun);
607  
608      scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
609  
610      if (r->req.cmd.buf[1] & 0x2) {
611          /* Command support data - optional, not implemented */
612          return false;
613      }
614  
615      if (r->req.cmd.buf[1] & 0x1) {
616          /* Vital product data */
617          uint8_t page_code = r->req.cmd.buf[2];
618          r->buf[r->len++] = page_code ; /* this page */
619          r->buf[r->len++] = 0x00;
620  
621          switch (page_code) {
622          case 0x00: /* Supported page codes, mandatory */
623          {
624              int pages;
625              pages = r->len++;
626              r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
627              r->buf[pages] = r->len - pages - 1; /* number of pages */
628              break;
629          }
630          default:
631              return false;
632          }
633          /* done with EVPD */
634          assert(r->len < r->buf_len);
635          r->len = MIN(r->req.cmd.xfer, r->len);
636          return true;
637      }
638  
639      /* Standard INQUIRY data */
640      if (r->req.cmd.buf[2] != 0) {
641          return false;
642      }
643  
644      /* PAGE CODE == 0 */
645      r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
646      memset(r->buf, 0, r->len);
647      if (r->req.lun != 0) {
648          r->buf[0] = TYPE_NO_LUN;
649      } else {
650          r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
651          r->buf[2] = 5; /* Version */
652          r->buf[3] = 2 | 0x10; /* HiSup, response data format */
653          r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
654          r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ.  */
655          memcpy(&r->buf[8], "QEMU    ", 8);
656          memcpy(&r->buf[16], "QEMU TARGET     ", 16);
657          pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
658      }
659      return true;
660  }
661  
scsi_sense_len(SCSIRequest * req)662  static size_t scsi_sense_len(SCSIRequest *req)
663  {
664      if (req->dev->type == TYPE_SCANNER)
665          return SCSI_SENSE_LEN_SCANNER;
666      else
667          return SCSI_SENSE_LEN;
668  }
669  
scsi_target_send_command(SCSIRequest * req,uint8_t * buf)670  static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
671  {
672      SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
673      int fixed_sense = (req->cmd.buf[1] & 1) == 0;
674  
675      if (req->lun != 0 &&
676          buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
677          scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
678          scsi_req_complete(req, CHECK_CONDITION);
679          return 0;
680      }
681      switch (buf[0]) {
682      case REPORT_LUNS:
683          if (!scsi_target_emulate_report_luns(r)) {
684              goto illegal_request;
685          }
686          break;
687      case INQUIRY:
688          if (!scsi_target_emulate_inquiry(r)) {
689              goto illegal_request;
690          }
691          break;
692      case REQUEST_SENSE:
693          scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
694          if (req->lun != 0) {
695              const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
696  
697              r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
698                                            sense, fixed_sense);
699          } else {
700              r->len = scsi_device_get_sense(r->req.dev, r->buf,
701                                             MIN(req->cmd.xfer, r->buf_len),
702                                             fixed_sense);
703          }
704          if (r->req.dev->sense_is_ua) {
705              scsi_device_unit_attention_reported(req->dev);
706              r->req.dev->sense_len = 0;
707              r->req.dev->sense_is_ua = false;
708          }
709          break;
710      case TEST_UNIT_READY:
711          break;
712      default:
713          scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
714          scsi_req_complete(req, CHECK_CONDITION);
715          return 0;
716      illegal_request:
717          scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
718          scsi_req_complete(req, CHECK_CONDITION);
719          return 0;
720      }
721  
722      if (!r->len) {
723          scsi_req_complete(req, GOOD);
724      }
725      return r->len;
726  }
727  
scsi_target_read_data(SCSIRequest * req)728  static void scsi_target_read_data(SCSIRequest *req)
729  {
730      SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
731      uint32_t n;
732  
733      n = r->len;
734      if (n > 0) {
735          r->len = 0;
736          scsi_req_data(&r->req, n);
737      } else {
738          scsi_req_complete(&r->req, GOOD);
739      }
740  }
741  
scsi_target_get_buf(SCSIRequest * req)742  static uint8_t *scsi_target_get_buf(SCSIRequest *req)
743  {
744      SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
745  
746      return r->buf;
747  }
748  
scsi_target_alloc_buf(SCSIRequest * req,size_t len)749  static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
750  {
751      SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
752  
753      r->buf = g_malloc(len);
754      r->buf_len = len;
755  
756      return r->buf;
757  }
758  
scsi_target_free_buf(SCSIRequest * req)759  static void scsi_target_free_buf(SCSIRequest *req)
760  {
761      SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
762  
763      g_free(r->buf);
764  }
765  
766  static const struct SCSIReqOps reqops_target_command = {
767      .size         = sizeof(SCSITargetReq),
768      .send_command = scsi_target_send_command,
769      .read_data    = scsi_target_read_data,
770      .get_buf      = scsi_target_get_buf,
771      .free_req     = scsi_target_free_buf,
772  };
773  
774  
scsi_req_alloc(const SCSIReqOps * reqops,SCSIDevice * d,uint32_t tag,uint32_t lun,void * hba_private)775  SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
776                              uint32_t tag, uint32_t lun, void *hba_private)
777  {
778      SCSIRequest *req;
779      SCSIBus *bus = scsi_bus_from_device(d);
780      BusState *qbus = BUS(bus);
781      const int memset_off = offsetof(SCSIRequest, sense)
782                             + sizeof(req->sense);
783  
784      req = g_malloc(reqops->size);
785      memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
786      req->refcount = 1;
787      req->bus = bus;
788      req->dev = d;
789      req->tag = tag;
790      req->lun = lun;
791      req->hba_private = hba_private;
792      req->status = -1;
793      req->host_status = -1;
794      req->ops = reqops;
795      object_ref(OBJECT(d));
796      object_ref(OBJECT(qbus->parent));
797      notifier_list_init(&req->cancel_notifiers);
798  
799      if (reqops->init_req) {
800          reqops->init_req(req);
801      }
802  
803      trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
804      return req;
805  }
806  
scsi_req_new(SCSIDevice * d,uint32_t tag,uint32_t lun,uint8_t * buf,size_t buf_len,void * hba_private)807  SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
808                            uint8_t *buf, size_t buf_len, void *hba_private)
809  {
810      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
811      const SCSIReqOps *ops;
812      SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
813      SCSIRequest *req;
814      SCSICommand cmd = { .len = 0 };
815      int ret;
816  
817      if (buf_len == 0) {
818          trace_scsi_req_parse_bad(d->id, lun, tag, 0);
819          goto invalid_opcode;
820      }
821  
822      if ((d->unit_attention.key == UNIT_ATTENTION ||
823           bus->unit_attention.key == UNIT_ATTENTION) &&
824          (buf[0] != INQUIRY &&
825           buf[0] != REPORT_LUNS &&
826           buf[0] != GET_CONFIGURATION &&
827           buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
828  
829           /*
830            * If we already have a pending unit attention condition,
831            * report this one before triggering another one.
832            */
833           !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
834          ops = &reqops_unit_attention;
835      } else if (lun != d->lun ||
836                 buf[0] == REPORT_LUNS ||
837                 (buf[0] == REQUEST_SENSE && d->sense_len)) {
838          ops = &reqops_target_command;
839      } else {
840          ops = NULL;
841      }
842  
843      if (ops != NULL || !sc->parse_cdb) {
844          ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len);
845      } else {
846          ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private);
847      }
848  
849      if (ret != 0) {
850          trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
851  invalid_opcode:
852          req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
853      } else {
854          assert(cmd.len != 0);
855          trace_scsi_req_parsed(d->id, lun, tag, buf[0],
856                                cmd.mode, cmd.xfer);
857          if (cmd.lba != -1) {
858              trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
859                                        cmd.lba);
860          }
861  
862          if (cmd.xfer > INT32_MAX) {
863              req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
864          } else if (ops) {
865              req = scsi_req_alloc(ops, d, tag, lun, hba_private);
866          } else {
867              req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
868          }
869      }
870  
871      req->cmd = cmd;
872      req->residual = req->cmd.xfer;
873  
874      switch (buf[0]) {
875      case INQUIRY:
876          trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
877          break;
878      case TEST_UNIT_READY:
879          trace_scsi_test_unit_ready(d->id, lun, tag);
880          break;
881      case REPORT_LUNS:
882          trace_scsi_report_luns(d->id, lun, tag);
883          break;
884      case REQUEST_SENSE:
885          trace_scsi_request_sense(d->id, lun, tag);
886          break;
887      default:
888          break;
889      }
890  
891      return req;
892  }
893  
scsi_req_get_buf(SCSIRequest * req)894  uint8_t *scsi_req_get_buf(SCSIRequest *req)
895  {
896      return req->ops->get_buf(req);
897  }
898  
scsi_clear_reported_luns_changed(SCSIRequest * req)899  static void scsi_clear_reported_luns_changed(SCSIRequest *req)
900  {
901      SCSISense *ua;
902  
903      if (req->dev->unit_attention.key == UNIT_ATTENTION) {
904          ua = &req->dev->unit_attention;
905      } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
906          ua = &req->bus->unit_attention;
907      } else {
908          return;
909      }
910  
911      if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
912          ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) {
913          *ua = SENSE_CODE(NO_SENSE);
914      }
915  }
916  
scsi_req_get_sense(SCSIRequest * req,uint8_t * buf,int len)917  int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
918  {
919      int ret;
920  
921      assert(len >= 14);
922      if (!req->sense_len) {
923          return 0;
924      }
925  
926      ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
927  
928      /*
929       * FIXME: clearing unit attention conditions upon autosense should be done
930       * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
931       * (SAM-5, 5.14).
932       *
933       * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
934       * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
935       * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
936       */
937      if (req->dev->sense_is_ua) {
938          scsi_device_unit_attention_reported(req->dev);
939          req->dev->sense_len = 0;
940          req->dev->sense_is_ua = false;
941      }
942      return ret;
943  }
944  
scsi_device_get_sense(SCSIDevice * dev,uint8_t * buf,int len,bool fixed)945  int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
946  {
947      return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
948  }
949  
scsi_req_build_sense(SCSIRequest * req,SCSISense sense)950  void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
951  {
952      trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
953                                 sense.key, sense.asc, sense.ascq);
954      req->sense_len = scsi_build_sense(req->sense, sense);
955  }
956  
scsi_req_enqueue_internal(SCSIRequest * req)957  static void scsi_req_enqueue_internal(SCSIRequest *req)
958  {
959      assert(!req->enqueued);
960      scsi_req_ref(req);
961      if (req->bus->info->get_sg_list) {
962          req->sg = req->bus->info->get_sg_list(req);
963      } else {
964          req->sg = NULL;
965      }
966      req->enqueued = true;
967      QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
968  }
969  
scsi_req_enqueue(SCSIRequest * req)970  int32_t scsi_req_enqueue(SCSIRequest *req)
971  {
972      int32_t rc;
973  
974      assert(!req->retry);
975      scsi_req_enqueue_internal(req);
976      scsi_req_ref(req);
977      rc = req->ops->send_command(req, req->cmd.buf);
978      scsi_req_unref(req);
979      return rc;
980  }
981  
scsi_req_dequeue(SCSIRequest * req)982  static void scsi_req_dequeue(SCSIRequest *req)
983  {
984      trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
985      req->retry = false;
986      if (req->enqueued) {
987          QTAILQ_REMOVE(&req->dev->requests, req, next);
988          req->enqueued = false;
989          scsi_req_unref(req);
990      }
991  }
992  
scsi_get_performance_length(int num_desc,int type,int data_type)993  static int scsi_get_performance_length(int num_desc, int type, int data_type)
994  {
995      /* MMC-6, paragraph 6.7.  */
996      switch (type) {
997      case 0:
998          if ((data_type & 3) == 0) {
999              /* Each descriptor is as in Table 295 - Nominal performance.  */
1000              return 16 * num_desc + 8;
1001          } else {
1002              /* Each descriptor is as in Table 296 - Exceptions.  */
1003              return 6 * num_desc + 8;
1004          }
1005      case 1:
1006      case 4:
1007      case 5:
1008          return 8 * num_desc + 8;
1009      case 2:
1010          return 2048 * num_desc + 8;
1011      case 3:
1012          return 16 * num_desc + 8;
1013      default:
1014          return 8;
1015      }
1016  }
1017  
ata_passthrough_xfer_unit(SCSIDevice * dev,uint8_t * buf)1018  static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
1019  {
1020      int byte_block = (buf[2] >> 2) & 0x1;
1021      int type = (buf[2] >> 4) & 0x1;
1022      int xfer_unit;
1023  
1024      if (byte_block) {
1025          if (type) {
1026              xfer_unit = dev->blocksize;
1027          } else {
1028              xfer_unit = 512;
1029          }
1030      } else {
1031          xfer_unit = 1;
1032      }
1033  
1034      return xfer_unit;
1035  }
1036  
ata_passthrough_12_xfer(SCSIDevice * dev,uint8_t * buf)1037  static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
1038  {
1039      int length = buf[2] & 0x3;
1040      int xfer;
1041      int unit = ata_passthrough_xfer_unit(dev, buf);
1042  
1043      switch (length) {
1044      case 0:
1045      case 3: /* USB-specific.  */
1046      default:
1047          xfer = 0;
1048          break;
1049      case 1:
1050          xfer = buf[3];
1051          break;
1052      case 2:
1053          xfer = buf[4];
1054          break;
1055      }
1056  
1057      return xfer * unit;
1058  }
1059  
ata_passthrough_16_xfer(SCSIDevice * dev,uint8_t * buf)1060  static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
1061  {
1062      int extend = buf[1] & 0x1;
1063      int length = buf[2] & 0x3;
1064      int xfer;
1065      int unit = ata_passthrough_xfer_unit(dev, buf);
1066  
1067      switch (length) {
1068      case 0:
1069      case 3: /* USB-specific.  */
1070      default:
1071          xfer = 0;
1072          break;
1073      case 1:
1074          xfer = buf[4];
1075          xfer |= (extend ? buf[3] << 8 : 0);
1076          break;
1077      case 2:
1078          xfer = buf[6];
1079          xfer |= (extend ? buf[5] << 8 : 0);
1080          break;
1081      }
1082  
1083      return xfer * unit;
1084  }
1085  
scsi_req_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1086  static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1087  {
1088      cmd->xfer = scsi_cdb_xfer(buf);
1089      switch (buf[0]) {
1090      case TEST_UNIT_READY:
1091      case REWIND:
1092      case START_STOP:
1093      case SET_CAPACITY:
1094      case WRITE_FILEMARKS:
1095      case WRITE_FILEMARKS_16:
1096      case SPACE:
1097      case RESERVE:
1098      case RELEASE:
1099      case ERASE:
1100      case ALLOW_MEDIUM_REMOVAL:
1101      case SEEK_10:
1102      case SYNCHRONIZE_CACHE:
1103      case SYNCHRONIZE_CACHE_16:
1104      case LOCATE_16:
1105      case LOCK_UNLOCK_CACHE:
1106      case SET_CD_SPEED:
1107      case SET_LIMITS:
1108      case WRITE_LONG_10:
1109      case UPDATE_BLOCK:
1110      case RESERVE_TRACK:
1111      case SET_READ_AHEAD:
1112      case PRE_FETCH:
1113      case PRE_FETCH_16:
1114      case ALLOW_OVERWRITE:
1115          cmd->xfer = 0;
1116          break;
1117      case VERIFY_10:
1118      case VERIFY_12:
1119      case VERIFY_16:
1120          if ((buf[1] & 2) == 0) {
1121              cmd->xfer = 0;
1122          } else if ((buf[1] & 4) != 0) {
1123              cmd->xfer = 1;
1124          }
1125          cmd->xfer *= dev->blocksize;
1126          break;
1127      case MODE_SENSE:
1128          break;
1129      case WRITE_SAME_10:
1130      case WRITE_SAME_16:
1131          cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
1132          break;
1133      case READ_CAPACITY_10:
1134          cmd->xfer = 8;
1135          break;
1136      case READ_BLOCK_LIMITS:
1137          cmd->xfer = 6;
1138          break;
1139      case SEND_VOLUME_TAG:
1140          /* GPCMD_SET_STREAMING from multimedia commands.  */
1141          if (dev->type == TYPE_ROM) {
1142              cmd->xfer = buf[10] | (buf[9] << 8);
1143          } else {
1144              cmd->xfer = buf[9] | (buf[8] << 8);
1145          }
1146          break;
1147      case WRITE_6:
1148          /* length 0 means 256 blocks */
1149          if (cmd->xfer == 0) {
1150              cmd->xfer = 256;
1151          }
1152          /* fall through */
1153      case WRITE_10:
1154      case WRITE_VERIFY_10:
1155      case WRITE_12:
1156      case WRITE_VERIFY_12:
1157      case WRITE_16:
1158      case WRITE_VERIFY_16:
1159          cmd->xfer *= dev->blocksize;
1160          break;
1161      case READ_6:
1162      case READ_REVERSE:
1163          /* length 0 means 256 blocks */
1164          if (cmd->xfer == 0) {
1165              cmd->xfer = 256;
1166          }
1167          /* fall through */
1168      case READ_10:
1169      case READ_12:
1170      case READ_16:
1171          cmd->xfer *= dev->blocksize;
1172          break;
1173      case FORMAT_UNIT:
1174          /* MMC mandates the parameter list to be 12-bytes long.  Parameters
1175           * for block devices are restricted to the header right now.  */
1176          if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1177              cmd->xfer = 12;
1178          } else {
1179              cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1180          }
1181          break;
1182      case INQUIRY:
1183      case RECEIVE_DIAGNOSTIC:
1184      case SEND_DIAGNOSTIC:
1185          cmd->xfer = buf[4] | (buf[3] << 8);
1186          break;
1187      case READ_CD:
1188      case READ_BUFFER:
1189      case WRITE_BUFFER:
1190      case SEND_CUE_SHEET:
1191          cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1192          break;
1193      case PERSISTENT_RESERVE_OUT:
1194          cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1195          break;
1196      case ERASE_12:
1197          if (dev->type == TYPE_ROM) {
1198              /* MMC command GET PERFORMANCE.  */
1199              cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1200                                                      buf[10], buf[1] & 0x1f);
1201          }
1202          break;
1203      case MECHANISM_STATUS:
1204      case READ_DVD_STRUCTURE:
1205      case SEND_DVD_STRUCTURE:
1206      case MAINTENANCE_OUT:
1207      case MAINTENANCE_IN:
1208          if (dev->type == TYPE_ROM) {
1209              /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1210              cmd->xfer = buf[9] | (buf[8] << 8);
1211          }
1212          break;
1213      case ATA_PASSTHROUGH_12:
1214          if (dev->type == TYPE_ROM) {
1215              /* BLANK command of MMC */
1216              cmd->xfer = 0;
1217          } else {
1218              cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1219          }
1220          break;
1221      case ATA_PASSTHROUGH_16:
1222          cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1223          break;
1224      }
1225      return 0;
1226  }
1227  
scsi_req_stream_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1228  static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1229  {
1230      switch (buf[0]) {
1231      /* stream commands */
1232      case ERASE_12:
1233      case ERASE_16:
1234          cmd->xfer = 0;
1235          break;
1236      case READ_6:
1237      case READ_REVERSE:
1238      case RECOVER_BUFFERED_DATA:
1239      case WRITE_6:
1240          cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1241          if (buf[1] & 0x01) { /* fixed */
1242              cmd->xfer *= dev->blocksize;
1243          }
1244          break;
1245      case READ_16:
1246      case READ_REVERSE_16:
1247      case VERIFY_16:
1248      case WRITE_16:
1249          cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1250          if (buf[1] & 0x01) { /* fixed */
1251              cmd->xfer *= dev->blocksize;
1252          }
1253          break;
1254      case REWIND:
1255      case LOAD_UNLOAD:
1256          cmd->xfer = 0;
1257          break;
1258      case SPACE_16:
1259          cmd->xfer = buf[13] | (buf[12] << 8);
1260          break;
1261      case READ_POSITION:
1262          switch (buf[1] & 0x1f) /* operation code */ {
1263          case SHORT_FORM_BLOCK_ID:
1264          case SHORT_FORM_VENDOR_SPECIFIC:
1265              cmd->xfer = 20;
1266              break;
1267          case LONG_FORM:
1268              cmd->xfer = 32;
1269              break;
1270          case EXTENDED_FORM:
1271              cmd->xfer = buf[8] | (buf[7] << 8);
1272              break;
1273          default:
1274              return -1;
1275          }
1276  
1277          break;
1278      case FORMAT_UNIT:
1279          cmd->xfer = buf[4] | (buf[3] << 8);
1280          break;
1281      /* generic commands */
1282      default:
1283          return scsi_req_xfer(cmd, dev, buf);
1284      }
1285      return 0;
1286  }
1287  
scsi_req_medium_changer_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1288  static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1289  {
1290      switch (buf[0]) {
1291      /* medium changer commands */
1292      case EXCHANGE_MEDIUM:
1293      case INITIALIZE_ELEMENT_STATUS:
1294      case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1295      case MOVE_MEDIUM:
1296      case POSITION_TO_ELEMENT:
1297          cmd->xfer = 0;
1298          break;
1299      case READ_ELEMENT_STATUS:
1300          cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1301          break;
1302  
1303      /* generic commands */
1304      default:
1305          return scsi_req_xfer(cmd, dev, buf);
1306      }
1307      return 0;
1308  }
1309  
scsi_req_scanner_length(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1310  static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1311  {
1312      switch (buf[0]) {
1313      /* Scanner commands */
1314      case OBJECT_POSITION:
1315          cmd->xfer = 0;
1316          break;
1317      case SCAN:
1318          cmd->xfer = buf[4];
1319          break;
1320      case READ_10:
1321      case SEND:
1322      case GET_WINDOW:
1323      case SET_WINDOW:
1324          cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1325          break;
1326      default:
1327          /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1328          return scsi_req_xfer(cmd, dev, buf);
1329      }
1330  
1331      return 0;
1332  }
1333  
scsi_cmd_xfer_mode(SCSICommand * cmd)1334  static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1335  {
1336      if (!cmd->xfer) {
1337          cmd->mode = SCSI_XFER_NONE;
1338          return;
1339      }
1340      switch (cmd->buf[0]) {
1341      case WRITE_6:
1342      case WRITE_10:
1343      case WRITE_VERIFY_10:
1344      case WRITE_12:
1345      case WRITE_VERIFY_12:
1346      case WRITE_16:
1347      case WRITE_VERIFY_16:
1348      case VERIFY_10:
1349      case VERIFY_12:
1350      case VERIFY_16:
1351      case COPY:
1352      case COPY_VERIFY:
1353      case COMPARE:
1354      case CHANGE_DEFINITION:
1355      case LOG_SELECT:
1356      case MODE_SELECT:
1357      case MODE_SELECT_10:
1358      case SEND_DIAGNOSTIC:
1359      case WRITE_BUFFER:
1360      case FORMAT_UNIT:
1361      case REASSIGN_BLOCKS:
1362      case SEARCH_EQUAL:
1363      case SEARCH_HIGH:
1364      case SEARCH_LOW:
1365      case UPDATE_BLOCK:
1366      case WRITE_LONG_10:
1367      case WRITE_SAME_10:
1368      case WRITE_SAME_16:
1369      case UNMAP:
1370      case SEARCH_HIGH_12:
1371      case SEARCH_EQUAL_12:
1372      case SEARCH_LOW_12:
1373      case MEDIUM_SCAN:
1374      case SEND_VOLUME_TAG:
1375      case SEND_CUE_SHEET:
1376      case SEND_DVD_STRUCTURE:
1377      case PERSISTENT_RESERVE_OUT:
1378      case MAINTENANCE_OUT:
1379      case SET_WINDOW:
1380      case SCAN:
1381          /* SCAN conflicts with START_STOP.  START_STOP has cmd->xfer set to 0 for
1382           * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1383           */
1384          cmd->mode = SCSI_XFER_TO_DEV;
1385          break;
1386      case ATA_PASSTHROUGH_12:
1387      case ATA_PASSTHROUGH_16:
1388          /* T_DIR */
1389          cmd->mode = (cmd->buf[2] & 0x8) ?
1390                     SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1391          break;
1392      default:
1393          cmd->mode = SCSI_XFER_FROM_DEV;
1394          break;
1395      }
1396  }
1397  
scsi_req_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len)1398  int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
1399                         size_t buf_len)
1400  {
1401      int rc;
1402      int len;
1403  
1404      cmd->lba = -1;
1405      len = scsi_cdb_length(buf);
1406      if (len < 0 || len > buf_len) {
1407          return -1;
1408      }
1409  
1410      cmd->len = len;
1411      switch (dev->type) {
1412      case TYPE_TAPE:
1413          rc = scsi_req_stream_xfer(cmd, dev, buf);
1414          break;
1415      case TYPE_MEDIUM_CHANGER:
1416          rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1417          break;
1418      case TYPE_SCANNER:
1419          rc = scsi_req_scanner_length(cmd, dev, buf);
1420          break;
1421      default:
1422          rc = scsi_req_xfer(cmd, dev, buf);
1423          break;
1424      }
1425  
1426      if (rc != 0)
1427          return rc;
1428  
1429      memcpy(cmd->buf, buf, cmd->len);
1430      scsi_cmd_xfer_mode(cmd);
1431      cmd->lba = scsi_cmd_lba(cmd);
1432      return 0;
1433  }
1434  
scsi_device_report_change(SCSIDevice * dev,SCSISense sense)1435  void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1436  {
1437      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1438  
1439      scsi_device_set_ua(dev, sense);
1440      if (bus->info->change) {
1441          bus->info->change(bus, dev, sense);
1442      }
1443  }
1444  
scsi_req_ref(SCSIRequest * req)1445  SCSIRequest *scsi_req_ref(SCSIRequest *req)
1446  {
1447      assert(req->refcount > 0);
1448      req->refcount++;
1449      return req;
1450  }
1451  
scsi_req_unref(SCSIRequest * req)1452  void scsi_req_unref(SCSIRequest *req)
1453  {
1454      assert(req->refcount > 0);
1455      if (--req->refcount == 0) {
1456          BusState *qbus = req->dev->qdev.parent_bus;
1457          SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1458  
1459          if (bus->info->free_request && req->hba_private) {
1460              bus->info->free_request(bus, req->hba_private);
1461          }
1462          if (req->ops->free_req) {
1463              req->ops->free_req(req);
1464          }
1465          object_unref(OBJECT(req->dev));
1466          object_unref(OBJECT(qbus->parent));
1467          g_free(req);
1468      }
1469  }
1470  
1471  /* Tell the device that we finished processing this chunk of I/O.  It
1472     will start the next chunk or complete the command.  */
scsi_req_continue(SCSIRequest * req)1473  void scsi_req_continue(SCSIRequest *req)
1474  {
1475      if (req->io_canceled) {
1476          trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1477          return;
1478      }
1479      trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1480      if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1481          req->ops->write_data(req);
1482      } else {
1483          req->ops->read_data(req);
1484      }
1485  }
1486  
1487  /* Called by the devices when data is ready for the HBA.  The HBA should
1488     start a DMA operation to read or fill the device's data buffer.
1489     Once it completes, calling scsi_req_continue will restart I/O.  */
scsi_req_data(SCSIRequest * req,int len)1490  void scsi_req_data(SCSIRequest *req, int len)
1491  {
1492      uint8_t *buf;
1493      if (req->io_canceled) {
1494          trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1495          return;
1496      }
1497      trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1498      assert(req->cmd.mode != SCSI_XFER_NONE);
1499      if (!req->sg) {
1500          req->residual -= len;
1501          req->bus->info->transfer_data(req, len);
1502          return;
1503      }
1504  
1505      /* If the device calls scsi_req_data and the HBA specified a
1506       * scatter/gather list, the transfer has to happen in a single
1507       * step.  */
1508      assert(!req->dma_started);
1509      req->dma_started = true;
1510  
1511      buf = scsi_req_get_buf(req);
1512      if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1513          dma_buf_read(buf, len, &req->residual, req->sg,
1514                       MEMTXATTRS_UNSPECIFIED);
1515      } else {
1516          dma_buf_write(buf, len, &req->residual, req->sg,
1517                        MEMTXATTRS_UNSPECIFIED);
1518      }
1519      scsi_req_continue(req);
1520  }
1521  
scsi_req_print(SCSIRequest * req)1522  void scsi_req_print(SCSIRequest *req)
1523  {
1524      FILE *fp = stderr;
1525      int i;
1526  
1527      fprintf(fp, "[%s id=%d] %s",
1528              req->dev->qdev.parent_bus->name,
1529              req->dev->id,
1530              scsi_command_name(req->cmd.buf[0]));
1531      for (i = 1; i < req->cmd.len; i++) {
1532          fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1533      }
1534      switch (req->cmd.mode) {
1535      case SCSI_XFER_NONE:
1536          fprintf(fp, " - none\n");
1537          break;
1538      case SCSI_XFER_FROM_DEV:
1539          fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1540          break;
1541      case SCSI_XFER_TO_DEV:
1542          fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1543          break;
1544      default:
1545          fprintf(fp, " - Oops\n");
1546          break;
1547      }
1548  }
1549  
scsi_req_complete_failed(SCSIRequest * req,int host_status)1550  void scsi_req_complete_failed(SCSIRequest *req, int host_status)
1551  {
1552      SCSISense sense;
1553      int status;
1554  
1555      assert(req->status == -1 && req->host_status == -1);
1556      assert(req->ops != &reqops_unit_attention);
1557  
1558      if (!req->bus->info->fail) {
1559          status = scsi_sense_from_host_status(req->host_status, &sense);
1560          if (status == CHECK_CONDITION) {
1561              scsi_req_build_sense(req, sense);
1562          }
1563          scsi_req_complete(req, status);
1564          return;
1565      }
1566  
1567      req->host_status = host_status;
1568      scsi_req_ref(req);
1569      scsi_req_dequeue(req);
1570      req->bus->info->fail(req);
1571  
1572      /* Cancelled requests might end up being completed instead of cancelled */
1573      notifier_list_notify(&req->cancel_notifiers, req);
1574      scsi_req_unref(req);
1575  }
1576  
scsi_req_complete(SCSIRequest * req,int status)1577  void scsi_req_complete(SCSIRequest *req, int status)
1578  {
1579      assert(req->status == -1 && req->host_status == -1);
1580      req->status = status;
1581      req->host_status = SCSI_HOST_OK;
1582  
1583      assert(req->sense_len <= sizeof(req->sense));
1584      if (status == GOOD) {
1585          req->sense_len = 0;
1586      }
1587  
1588      if (req->sense_len) {
1589          memcpy(req->dev->sense, req->sense, req->sense_len);
1590          req->dev->sense_len = req->sense_len;
1591          req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1592      } else {
1593          req->dev->sense_len = 0;
1594          req->dev->sense_is_ua = false;
1595      }
1596  
1597      scsi_req_ref(req);
1598      scsi_req_dequeue(req);
1599      req->bus->info->complete(req, req->residual);
1600  
1601      /* Cancelled requests might end up being completed instead of cancelled */
1602      notifier_list_notify(&req->cancel_notifiers, req);
1603      scsi_req_unref(req);
1604  }
1605  
1606  /* Called by the devices when the request is canceled. */
scsi_req_cancel_complete(SCSIRequest * req)1607  void scsi_req_cancel_complete(SCSIRequest *req)
1608  {
1609      assert(req->io_canceled);
1610      if (req->bus->info->cancel) {
1611          req->bus->info->cancel(req);
1612      }
1613      notifier_list_notify(&req->cancel_notifiers, req);
1614      scsi_req_unref(req);
1615  }
1616  
1617  /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1618   * notifier list, the bus will be notified the requests cancellation is
1619   * completed.
1620   * */
scsi_req_cancel_async(SCSIRequest * req,Notifier * notifier)1621  void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1622  {
1623      trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1624      if (notifier) {
1625          notifier_list_add(&req->cancel_notifiers, notifier);
1626      }
1627      if (req->io_canceled) {
1628          /* A blk_aio_cancel_async is pending; when it finishes,
1629           * scsi_req_cancel_complete will be called and will
1630           * call the notifier we just added.  Just wait for that.
1631           */
1632          assert(req->aiocb);
1633          return;
1634      }
1635      /* Dropped in scsi_req_cancel_complete.  */
1636      scsi_req_ref(req);
1637      scsi_req_dequeue(req);
1638      req->io_canceled = true;
1639      if (req->aiocb) {
1640          blk_aio_cancel_async(req->aiocb);
1641      } else {
1642          scsi_req_cancel_complete(req);
1643      }
1644  }
1645  
scsi_req_cancel(SCSIRequest * req)1646  void scsi_req_cancel(SCSIRequest *req)
1647  {
1648      trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1649      if (!req->enqueued) {
1650          return;
1651      }
1652      assert(!req->io_canceled);
1653      /* Dropped in scsi_req_cancel_complete.  */
1654      scsi_req_ref(req);
1655      scsi_req_dequeue(req);
1656      req->io_canceled = true;
1657      if (req->aiocb) {
1658          blk_aio_cancel(req->aiocb);
1659      } else {
1660          scsi_req_cancel_complete(req);
1661      }
1662  }
1663  
scsi_ua_precedence(SCSISense sense)1664  static int scsi_ua_precedence(SCSISense sense)
1665  {
1666      if (sense.key != UNIT_ATTENTION) {
1667          return INT_MAX;
1668      }
1669      if (sense.asc == 0x29 && sense.ascq == 0x04) {
1670          /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1671          return 1;
1672      } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1673          /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1674          return 2;
1675      } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1676          /* These two go with "all others". */
1677          ;
1678      } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1679          /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1680           * POWER ON OCCURRED = 1
1681           * SCSI BUS RESET OCCURRED = 2
1682           * BUS DEVICE RESET FUNCTION OCCURRED = 3
1683           * I_T NEXUS LOSS OCCURRED = 7
1684           */
1685          return sense.ascq;
1686      } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1687          /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION  */
1688          return 8;
1689      }
1690      return (sense.asc << 8) | sense.ascq;
1691  }
1692  
scsi_bus_set_ua(SCSIBus * bus,SCSISense sense)1693  void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense)
1694  {
1695      int prec1, prec2;
1696      if (sense.key != UNIT_ATTENTION) {
1697          return;
1698      }
1699  
1700      /*
1701       * Override a pre-existing unit attention condition, except for a more
1702       * important reset condition.
1703       */
1704      prec1 = scsi_ua_precedence(bus->unit_attention);
1705      prec2 = scsi_ua_precedence(sense);
1706      if (prec2 < prec1) {
1707          bus->unit_attention = sense;
1708      }
1709  }
1710  
scsi_device_set_ua(SCSIDevice * sdev,SCSISense sense)1711  void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1712  {
1713      int prec1, prec2;
1714      if (sense.key != UNIT_ATTENTION) {
1715          return;
1716      }
1717      trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1718                               sense.asc, sense.ascq);
1719  
1720      /*
1721       * Override a pre-existing unit attention condition, except for a more
1722       * important reset condition.
1723      */
1724      prec1 = scsi_ua_precedence(sdev->unit_attention);
1725      prec2 = scsi_ua_precedence(sense);
1726      if (prec2 < prec1) {
1727          sdev->unit_attention = sense;
1728      }
1729  }
1730  
scsi_device_purge_one_req(SCSIRequest * req,void * opaque)1731  static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
1732  {
1733      scsi_req_cancel_async(req, NULL);
1734  }
1735  
1736  /**
1737   * Cancel all requests, and block until they are deleted.
1738   */
scsi_device_purge_requests(SCSIDevice * sdev,SCSISense sense)1739  void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1740  {
1741      scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
1742  
1743      /*
1744       * Await all the scsi_device_purge_one_req() calls scheduled by
1745       * scsi_device_for_each_req_async(), and all I/O requests that were
1746       * cancelled this way, but may still take a bit of time to settle.
1747       */
1748      blk_drain(sdev->conf.blk);
1749  
1750      scsi_device_set_ua(sdev, sense);
1751  }
1752  
scsi_device_drained_begin(SCSIDevice * sdev)1753  void scsi_device_drained_begin(SCSIDevice *sdev)
1754  {
1755      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1756      if (!bus) {
1757          return;
1758      }
1759  
1760      assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1761      assert(bus->drain_count < INT_MAX);
1762  
1763      /*
1764       * Multiple BlockBackends can be on a SCSIBus and each may begin/end
1765       * draining at any time. Keep a counter so HBAs only see begin/end once.
1766       */
1767      if (bus->drain_count++ == 0) {
1768          trace_scsi_bus_drained_begin(bus, sdev);
1769          if (bus->info->drained_begin) {
1770              bus->info->drained_begin(bus);
1771          }
1772      }
1773  }
1774  
scsi_device_drained_end(SCSIDevice * sdev)1775  void scsi_device_drained_end(SCSIDevice *sdev)
1776  {
1777      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1778      if (!bus) {
1779          return;
1780      }
1781  
1782      assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1783      assert(bus->drain_count > 0);
1784  
1785      if (bus->drain_count-- == 1) {
1786          trace_scsi_bus_drained_end(bus, sdev);
1787          if (bus->info->drained_end) {
1788              bus->info->drained_end(bus);
1789          }
1790      }
1791  }
1792  
scsibus_get_dev_path(DeviceState * dev)1793  static char *scsibus_get_dev_path(DeviceState *dev)
1794  {
1795      SCSIDevice *d = SCSI_DEVICE(dev);
1796      DeviceState *hba = dev->parent_bus->parent;
1797      char *id;
1798      char *path;
1799  
1800      id = qdev_get_dev_path(hba);
1801      if (id) {
1802          path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1803      } else {
1804          path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1805      }
1806      g_free(id);
1807      return path;
1808  }
1809  
scsibus_get_fw_dev_path(DeviceState * dev)1810  static char *scsibus_get_fw_dev_path(DeviceState *dev)
1811  {
1812      SCSIDevice *d = SCSI_DEVICE(dev);
1813      return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1814                             qdev_fw_name(dev), d->id, d->lun);
1815  }
1816  
1817  /* SCSI request list.  For simplicity, pv points to the whole device */
1818  
put_scsi_req(SCSIRequest * req,void * opaque)1819  static void put_scsi_req(SCSIRequest *req, void *opaque)
1820  {
1821      QEMUFile *f = opaque;
1822  
1823      assert(!req->io_canceled);
1824      assert(req->status == -1 && req->host_status == -1);
1825      assert(req->enqueued);
1826  
1827      qemu_put_sbyte(f, req->retry ? 1 : 2);
1828      qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1829      qemu_put_be32s(f, &req->tag);
1830      qemu_put_be32s(f, &req->lun);
1831      if (req->bus->info->save_request) {
1832          req->bus->info->save_request(f, req);
1833      }
1834      if (req->ops->save_request) {
1835          req->ops->save_request(f, req);
1836      }
1837  }
1838  
put_scsi_requests(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)1839  static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1840                               const VMStateField *field, JSONWriter *vmdesc)
1841  {
1842      SCSIDevice *s = pv;
1843  
1844      scsi_device_for_each_req_sync(s, put_scsi_req, f);
1845      qemu_put_sbyte(f, 0);
1846      return 0;
1847  }
1848  
get_scsi_requests(QEMUFile * f,void * pv,size_t size,const VMStateField * field)1849  static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1850                               const VMStateField *field)
1851  {
1852      SCSIDevice *s = pv;
1853      SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1854      int8_t sbyte;
1855  
1856      while ((sbyte = qemu_get_sbyte(f)) > 0) {
1857          uint8_t buf[SCSI_CMD_BUF_SIZE];
1858          uint32_t tag;
1859          uint32_t lun;
1860          SCSIRequest *req;
1861  
1862          qemu_get_buffer(f, buf, sizeof(buf));
1863          qemu_get_be32s(f, &tag);
1864          qemu_get_be32s(f, &lun);
1865          /*
1866           * A too-short CDB would have been rejected by scsi_req_new, so just use
1867           * SCSI_CMD_BUF_SIZE as the CDB length.
1868           */
1869          req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL);
1870          req->retry = (sbyte == 1);
1871          if (bus->info->load_request) {
1872              req->hba_private = bus->info->load_request(f, req);
1873          }
1874          if (req->ops->load_request) {
1875              req->ops->load_request(f, req);
1876          }
1877  
1878          /* Just restart it later.  */
1879          scsi_req_enqueue_internal(req);
1880  
1881          /* At this point, the request will be kept alive by the reference
1882           * added by scsi_req_enqueue_internal, so we can release our reference.
1883           * The HBA of course will add its own reference in the load_request
1884           * callback if it needs to hold on the SCSIRequest.
1885           */
1886          scsi_req_unref(req);
1887      }
1888  
1889      return 0;
1890  }
1891  
1892  static const VMStateInfo vmstate_info_scsi_requests = {
1893      .name = "scsi-requests",
1894      .get  = get_scsi_requests,
1895      .put  = put_scsi_requests,
1896  };
1897  
scsi_sense_state_needed(void * opaque)1898  static bool scsi_sense_state_needed(void *opaque)
1899  {
1900      SCSIDevice *s = opaque;
1901  
1902      return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1903  }
1904  
1905  static const VMStateDescription vmstate_scsi_sense_state = {
1906      .name = "SCSIDevice/sense",
1907      .version_id = 1,
1908      .minimum_version_id = 1,
1909      .needed = scsi_sense_state_needed,
1910      .fields = (const VMStateField[]) {
1911          VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1912                                  SCSI_SENSE_BUF_SIZE_OLD,
1913                                  SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1914          VMSTATE_END_OF_LIST()
1915      }
1916  };
1917  
1918  const VMStateDescription vmstate_scsi_device = {
1919      .name = "SCSIDevice",
1920      .version_id = 1,
1921      .minimum_version_id = 1,
1922      .fields = (const VMStateField[]) {
1923          VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1924          VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1925          VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1926          VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1927          VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1928          VMSTATE_UINT32(sense_len, SCSIDevice),
1929          {
1930              .name         = "requests",
1931              .version_id   = 0,
1932              .field_exists = NULL,
1933              .size         = 0,   /* ouch */
1934              .info         = &vmstate_info_scsi_requests,
1935              .flags        = VMS_SINGLE,
1936              .offset       = 0,
1937          },
1938          VMSTATE_END_OF_LIST()
1939      },
1940      .subsections = (const VMStateDescription * const []) {
1941          &vmstate_scsi_sense_state,
1942          NULL
1943      }
1944  };
1945  
1946  static Property scsi_props[] = {
1947      DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
1948      DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
1949      DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
1950      DEFINE_PROP_END_OF_LIST(),
1951  };
1952  
scsi_device_class_init(ObjectClass * klass,void * data)1953  static void scsi_device_class_init(ObjectClass *klass, void *data)
1954  {
1955      DeviceClass *k = DEVICE_CLASS(klass);
1956      set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1957      k->bus_type  = TYPE_SCSI_BUS;
1958      k->realize   = scsi_qdev_realize;
1959      k->unrealize = scsi_qdev_unrealize;
1960      device_class_set_props(k, scsi_props);
1961  }
1962  
scsi_dev_instance_init(Object * obj)1963  static void scsi_dev_instance_init(Object *obj)
1964  {
1965      DeviceState *dev = DEVICE(obj);
1966      SCSIDevice *s = SCSI_DEVICE(dev);
1967  
1968      device_add_bootindex_property(obj, &s->conf.bootindex,
1969                                    "bootindex", NULL,
1970                                    &s->qdev);
1971  }
1972  
1973  static const TypeInfo scsi_device_type_info = {
1974      .name = TYPE_SCSI_DEVICE,
1975      .parent = TYPE_DEVICE,
1976      .instance_size = sizeof(SCSIDevice),
1977      .abstract = true,
1978      .class_size = sizeof(SCSIDeviceClass),
1979      .class_init = scsi_device_class_init,
1980      .instance_init = scsi_dev_instance_init,
1981  };
1982  
scsi_bus_class_init(ObjectClass * klass,void * data)1983  static void scsi_bus_class_init(ObjectClass *klass, void *data)
1984  {
1985      BusClass *k = BUS_CLASS(klass);
1986      HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1987  
1988      k->get_dev_path = scsibus_get_dev_path;
1989      k->get_fw_dev_path = scsibus_get_fw_dev_path;
1990      k->check_address = scsi_bus_check_address;
1991      hc->unplug = qdev_simple_device_unplug_cb;
1992  }
1993  
1994  static const TypeInfo scsi_bus_info = {
1995      .name = TYPE_SCSI_BUS,
1996      .parent = TYPE_BUS,
1997      .instance_size = sizeof(SCSIBus),
1998      .class_init = scsi_bus_class_init,
1999      .interfaces = (InterfaceInfo[]) {
2000          { TYPE_HOTPLUG_HANDLER },
2001          { }
2002      }
2003  };
2004  
scsi_register_types(void)2005  static void scsi_register_types(void)
2006  {
2007      type_register_static(&scsi_bus_info);
2008      type_register_static(&scsi_device_type_info);
2009  }
2010  
2011  type_init(scsi_register_types)
2012