xref: /openbmc/qemu/block/iscsi.c (revision a9ded601)
1 /*
2  * QEMU Block driver for iSCSI images
3  *
4  * Copyright (c) 2010-2011 Ronnie Sahlberg <ronniesahlberg@gmail.com>
5  * Copyright (c) 2012-2016 Peter Lieven <pl@kamp.de>
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 
28 #include <poll.h>
29 #include <math.h>
30 #include <arpa/inet.h>
31 #include "qemu-common.h"
32 #include "qemu/config-file.h"
33 #include "qemu/error-report.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "block/block_int.h"
37 #include "block/scsi.h"
38 #include "qemu/iov.h"
39 #include "qemu/uuid.h"
40 #include "qmp-commands.h"
41 #include "qapi/qmp/qstring.h"
42 #include "crypto/secret.h"
43 
44 #include <iscsi/iscsi.h>
45 #include <iscsi/scsi-lowlevel.h>
46 
47 #ifdef __linux__
48 #include <scsi/sg.h>
49 #endif
50 
51 typedef struct IscsiLun {
52     struct iscsi_context *iscsi;
53     AioContext *aio_context;
54     int lun;
55     enum scsi_inquiry_peripheral_device_type type;
56     int block_size;
57     uint64_t num_blocks;
58     int events;
59     QEMUTimer *nop_timer;
60     QEMUTimer *event_timer;
61     QemuMutex mutex;
62     struct scsi_inquiry_logical_block_provisioning lbp;
63     struct scsi_inquiry_block_limits bl;
64     unsigned char *zeroblock;
65     /* The allocmap tracks which clusters (pages) on the iSCSI target are
66      * allocated and which are not. In case a target returns zeros for
67      * unallocated pages (iscsilun->lprz) we can directly return zeros instead
68      * of reading zeros over the wire if a read request falls within an
69      * unallocated block. As there are 3 possible states we need 2 bitmaps to
70      * track. allocmap_valid keeps track if QEMU's information about a page is
71      * valid. allocmap tracks if a page is allocated or not. In case QEMU has no
72      * valid information about a page the corresponding allocmap entry should be
73      * switched to unallocated as well to force a new lookup of the allocation
74      * status as lookups are generally skipped if a page is suspect to be
75      * allocated. If a iSCSI target is opened with cache.direct = on the
76      * allocmap_valid does not exist turning all cached information invalid so
77      * that a fresh lookup is made for any page even if allocmap entry returns
78      * it's unallocated. */
79     unsigned long *allocmap;
80     unsigned long *allocmap_valid;
81     long allocmap_size;
82     int cluster_sectors;
83     bool use_16_for_rw;
84     bool write_protected;
85     bool lbpme;
86     bool lbprz;
87     bool dpofua;
88     bool has_write_same;
89     bool request_timed_out;
90 } IscsiLun;
91 
92 typedef struct IscsiTask {
93     int status;
94     int complete;
95     int retries;
96     int do_retry;
97     struct scsi_task *task;
98     Coroutine *co;
99     IscsiLun *iscsilun;
100     QEMUTimer retry_timer;
101     int err_code;
102 } IscsiTask;
103 
104 typedef struct IscsiAIOCB {
105     BlockAIOCB common;
106     QEMUBH *bh;
107     IscsiLun *iscsilun;
108     struct scsi_task *task;
109     uint8_t *buf;
110     int status;
111     int64_t sector_num;
112     int nb_sectors;
113     int ret;
114 #ifdef __linux__
115     sg_io_hdr_t *ioh;
116 #endif
117 } IscsiAIOCB;
118 
119 /* libiscsi uses time_t so its enough to process events every second */
120 #define EVENT_INTERVAL 1000
121 #define NOP_INTERVAL 5000
122 #define MAX_NOP_FAILURES 3
123 #define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times)
124 static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048, 8192, 32768};
125 
126 /* this threshold is a trade-off knob to choose between
127  * the potential additional overhead of an extra GET_LBA_STATUS request
128  * vs. unnecessarily reading a lot of zero sectors over the wire.
129  * If a read request is greater or equal than ISCSI_CHECKALLOC_THRES
130  * sectors we check the allocation status of the area covered by the
131  * request first if the allocationmap indicates that the area might be
132  * unallocated. */
133 #define ISCSI_CHECKALLOC_THRES 64
134 
135 static void
136 iscsi_bh_cb(void *p)
137 {
138     IscsiAIOCB *acb = p;
139 
140     qemu_bh_delete(acb->bh);
141 
142     g_free(acb->buf);
143     acb->buf = NULL;
144 
145     acb->common.cb(acb->common.opaque, acb->status);
146 
147     if (acb->task != NULL) {
148         scsi_free_scsi_task(acb->task);
149         acb->task = NULL;
150     }
151 
152     qemu_aio_unref(acb);
153 }
154 
155 static void
156 iscsi_schedule_bh(IscsiAIOCB *acb)
157 {
158     if (acb->bh) {
159         return;
160     }
161     acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb);
162     qemu_bh_schedule(acb->bh);
163 }
164 
165 static void iscsi_co_generic_bh_cb(void *opaque)
166 {
167     struct IscsiTask *iTask = opaque;
168 
169     iTask->complete = 1;
170     aio_co_wake(iTask->co);
171 }
172 
173 static void iscsi_retry_timer_expired(void *opaque)
174 {
175     struct IscsiTask *iTask = opaque;
176     iTask->complete = 1;
177     if (iTask->co) {
178         aio_co_wake(iTask->co);
179     }
180 }
181 
182 static inline unsigned exp_random(double mean)
183 {
184     return -mean * log((double)rand() / RAND_MAX);
185 }
186 
187 /* SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST was introduced in
188  * libiscsi 1.10.0, together with other constants we need.  Use it as
189  * a hint that we have to define them ourselves if needed, to keep the
190  * minimum required libiscsi version at 1.9.0.  We use an ASCQ macro for
191  * the test because SCSI_STATUS_* is an enum.
192  *
193  * To guard against future changes where SCSI_SENSE_ASCQ_* also becomes
194  * an enum, check against the LIBISCSI_API_VERSION macro, which was
195  * introduced in 1.11.0.  If it is present, there is no need to define
196  * anything.
197  */
198 #if !defined(SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST) && \
199     !defined(LIBISCSI_API_VERSION)
200 #define SCSI_STATUS_TASK_SET_FULL                          0x28
201 #define SCSI_STATUS_TIMEOUT                                0x0f000002
202 #define SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST    0x2600
203 #define SCSI_SENSE_ASCQ_PARAMETER_LIST_LENGTH_ERROR        0x1a00
204 #endif
205 
206 #ifndef LIBISCSI_API_VERSION
207 #define LIBISCSI_API_VERSION 20130701
208 #endif
209 
210 static int iscsi_translate_sense(struct scsi_sense *sense)
211 {
212     int ret;
213 
214     switch (sense->key) {
215     case SCSI_SENSE_NOT_READY:
216         return -EBUSY;
217     case SCSI_SENSE_DATA_PROTECTION:
218         return -EACCES;
219     case SCSI_SENSE_COMMAND_ABORTED:
220         return -ECANCELED;
221     case SCSI_SENSE_ILLEGAL_REQUEST:
222         /* Parse ASCQ */
223         break;
224     default:
225         return -EIO;
226     }
227     switch (sense->ascq) {
228     case SCSI_SENSE_ASCQ_PARAMETER_LIST_LENGTH_ERROR:
229     case SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE:
230     case SCSI_SENSE_ASCQ_INVALID_FIELD_IN_CDB:
231     case SCSI_SENSE_ASCQ_INVALID_FIELD_IN_PARAMETER_LIST:
232         ret = -EINVAL;
233         break;
234     case SCSI_SENSE_ASCQ_LBA_OUT_OF_RANGE:
235         ret = -ENOSPC;
236         break;
237     case SCSI_SENSE_ASCQ_LOGICAL_UNIT_NOT_SUPPORTED:
238         ret = -ENOTSUP;
239         break;
240     case SCSI_SENSE_ASCQ_MEDIUM_NOT_PRESENT:
241     case SCSI_SENSE_ASCQ_MEDIUM_NOT_PRESENT_TRAY_CLOSED:
242     case SCSI_SENSE_ASCQ_MEDIUM_NOT_PRESENT_TRAY_OPEN:
243         ret = -ENOMEDIUM;
244         break;
245     case SCSI_SENSE_ASCQ_WRITE_PROTECTED:
246         ret = -EACCES;
247         break;
248     default:
249         ret = -EIO;
250         break;
251     }
252     return ret;
253 }
254 
255 /* Called (via iscsi_service) with QemuMutex held.  */
256 static void
257 iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
258                         void *command_data, void *opaque)
259 {
260     struct IscsiTask *iTask = opaque;
261     struct scsi_task *task = command_data;
262 
263     iTask->status = status;
264     iTask->do_retry = 0;
265     iTask->task = task;
266 
267     if (status != SCSI_STATUS_GOOD) {
268         if (iTask->retries++ < ISCSI_CMD_RETRIES) {
269             if (status == SCSI_STATUS_CHECK_CONDITION
270                 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
271                 error_report("iSCSI CheckCondition: %s",
272                              iscsi_get_error(iscsi));
273                 iTask->do_retry = 1;
274                 goto out;
275             }
276             if (status == SCSI_STATUS_BUSY ||
277                 status == SCSI_STATUS_TIMEOUT ||
278                 status == SCSI_STATUS_TASK_SET_FULL) {
279                 unsigned retry_time =
280                     exp_random(iscsi_retry_times[iTask->retries - 1]);
281                 if (status == SCSI_STATUS_TIMEOUT) {
282                     /* make sure the request is rescheduled AFTER the
283                      * reconnect is initiated */
284                     retry_time = EVENT_INTERVAL * 2;
285                     iTask->iscsilun->request_timed_out = true;
286                 }
287                 error_report("iSCSI Busy/TaskSetFull/TimeOut"
288                              " (retry #%u in %u ms): %s",
289                              iTask->retries, retry_time,
290                              iscsi_get_error(iscsi));
291                 aio_timer_init(iTask->iscsilun->aio_context,
292                                &iTask->retry_timer, QEMU_CLOCK_REALTIME,
293                                SCALE_MS, iscsi_retry_timer_expired, iTask);
294                 timer_mod(&iTask->retry_timer,
295                           qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
296                 iTask->do_retry = 1;
297                 return;
298             }
299         }
300         iTask->err_code = iscsi_translate_sense(&task->sense);
301         error_report("iSCSI Failure: %s", iscsi_get_error(iscsi));
302     }
303 
304 out:
305     if (iTask->co) {
306         aio_bh_schedule_oneshot(iTask->iscsilun->aio_context,
307                                  iscsi_co_generic_bh_cb, iTask);
308     } else {
309         iTask->complete = 1;
310     }
311 }
312 
313 static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask)
314 {
315     *iTask = (struct IscsiTask) {
316         .co         = qemu_coroutine_self(),
317         .iscsilun   = iscsilun,
318     };
319 }
320 
321 static void
322 iscsi_abort_task_cb(struct iscsi_context *iscsi, int status, void *command_data,
323                     void *private_data)
324 {
325     IscsiAIOCB *acb = private_data;
326 
327     acb->status = -ECANCELED;
328     iscsi_schedule_bh(acb);
329 }
330 
331 static void
332 iscsi_aio_cancel(BlockAIOCB *blockacb)
333 {
334     IscsiAIOCB *acb = (IscsiAIOCB *)blockacb;
335     IscsiLun *iscsilun = acb->iscsilun;
336 
337     if (acb->status != -EINPROGRESS) {
338         return;
339     }
340 
341     /* send a task mgmt call to the target to cancel the task on the target */
342     iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task,
343                                      iscsi_abort_task_cb, acb);
344 
345 }
346 
347 static const AIOCBInfo iscsi_aiocb_info = {
348     .aiocb_size         = sizeof(IscsiAIOCB),
349     .cancel_async       = iscsi_aio_cancel,
350 };
351 
352 
353 static void iscsi_process_read(void *arg);
354 static void iscsi_process_write(void *arg);
355 
356 /* Called with QemuMutex held.  */
357 static void
358 iscsi_set_events(IscsiLun *iscsilun)
359 {
360     struct iscsi_context *iscsi = iscsilun->iscsi;
361     int ev = iscsi_which_events(iscsi);
362 
363     if (ev != iscsilun->events) {
364         aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi),
365                            false,
366                            (ev & POLLIN) ? iscsi_process_read : NULL,
367                            (ev & POLLOUT) ? iscsi_process_write : NULL,
368                            NULL,
369                            iscsilun);
370         iscsilun->events = ev;
371     }
372 }
373 
374 static void iscsi_timed_check_events(void *opaque)
375 {
376     IscsiLun *iscsilun = opaque;
377 
378     /* check for timed out requests */
379     iscsi_service(iscsilun->iscsi, 0);
380 
381     if (iscsilun->request_timed_out) {
382         iscsilun->request_timed_out = false;
383         iscsi_reconnect(iscsilun->iscsi);
384     }
385 
386     /* newer versions of libiscsi may return zero events. Ensure we are able
387      * to return to service once this situation changes. */
388     iscsi_set_events(iscsilun);
389 
390     timer_mod(iscsilun->event_timer,
391               qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL);
392 }
393 
394 static void
395 iscsi_process_read(void *arg)
396 {
397     IscsiLun *iscsilun = arg;
398     struct iscsi_context *iscsi = iscsilun->iscsi;
399 
400     qemu_mutex_lock(&iscsilun->mutex);
401     iscsi_service(iscsi, POLLIN);
402     iscsi_set_events(iscsilun);
403     qemu_mutex_unlock(&iscsilun->mutex);
404 }
405 
406 static void
407 iscsi_process_write(void *arg)
408 {
409     IscsiLun *iscsilun = arg;
410     struct iscsi_context *iscsi = iscsilun->iscsi;
411 
412     qemu_mutex_lock(&iscsilun->mutex);
413     iscsi_service(iscsi, POLLOUT);
414     iscsi_set_events(iscsilun);
415     qemu_mutex_unlock(&iscsilun->mutex);
416 }
417 
418 static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
419 {
420     return sector * iscsilun->block_size / BDRV_SECTOR_SIZE;
421 }
422 
423 static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun)
424 {
425     return sector * BDRV_SECTOR_SIZE / iscsilun->block_size;
426 }
427 
428 static bool is_byte_request_lun_aligned(int64_t offset, int count,
429                                         IscsiLun *iscsilun)
430 {
431     if (offset % iscsilun->block_size || count % iscsilun->block_size) {
432         error_report("iSCSI misaligned request: "
433                      "iscsilun->block_size %u, offset %" PRIi64
434                      ", count %d",
435                      iscsilun->block_size, offset, count);
436         return false;
437     }
438     return true;
439 }
440 
441 static bool is_sector_request_lun_aligned(int64_t sector_num, int nb_sectors,
442                                           IscsiLun *iscsilun)
443 {
444     assert(nb_sectors <= BDRV_REQUEST_MAX_SECTORS);
445     return is_byte_request_lun_aligned(sector_num << BDRV_SECTOR_BITS,
446                                        nb_sectors << BDRV_SECTOR_BITS,
447                                        iscsilun);
448 }
449 
450 static void iscsi_allocmap_free(IscsiLun *iscsilun)
451 {
452     g_free(iscsilun->allocmap);
453     g_free(iscsilun->allocmap_valid);
454     iscsilun->allocmap = NULL;
455     iscsilun->allocmap_valid = NULL;
456 }
457 
458 
459 static int iscsi_allocmap_init(IscsiLun *iscsilun, int open_flags)
460 {
461     iscsi_allocmap_free(iscsilun);
462 
463     iscsilun->allocmap_size =
464         DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks, iscsilun),
465                      iscsilun->cluster_sectors);
466 
467     iscsilun->allocmap = bitmap_try_new(iscsilun->allocmap_size);
468     if (!iscsilun->allocmap) {
469         return -ENOMEM;
470     }
471 
472     if (open_flags & BDRV_O_NOCACHE) {
473         /* in case that cache.direct = on all allocmap entries are
474          * treated as invalid to force a relookup of the block
475          * status on every read request */
476         return 0;
477     }
478 
479     iscsilun->allocmap_valid = bitmap_try_new(iscsilun->allocmap_size);
480     if (!iscsilun->allocmap_valid) {
481         /* if we are under memory pressure free the allocmap as well */
482         iscsi_allocmap_free(iscsilun);
483         return -ENOMEM;
484     }
485 
486     return 0;
487 }
488 
489 static void
490 iscsi_allocmap_update(IscsiLun *iscsilun, int64_t sector_num,
491                       int nb_sectors, bool allocated, bool valid)
492 {
493     int64_t cl_num_expanded, nb_cls_expanded, cl_num_shrunk, nb_cls_shrunk;
494 
495     if (iscsilun->allocmap == NULL) {
496         return;
497     }
498     /* expand to entirely contain all affected clusters */
499     cl_num_expanded = sector_num / iscsilun->cluster_sectors;
500     nb_cls_expanded = DIV_ROUND_UP(sector_num + nb_sectors,
501                                    iscsilun->cluster_sectors) - cl_num_expanded;
502     /* shrink to touch only completely contained clusters */
503     cl_num_shrunk = DIV_ROUND_UP(sector_num, iscsilun->cluster_sectors);
504     nb_cls_shrunk = (sector_num + nb_sectors) / iscsilun->cluster_sectors
505                       - cl_num_shrunk;
506     if (allocated) {
507         bitmap_set(iscsilun->allocmap, cl_num_expanded, nb_cls_expanded);
508     } else {
509         if (nb_cls_shrunk > 0) {
510             bitmap_clear(iscsilun->allocmap, cl_num_shrunk, nb_cls_shrunk);
511         }
512     }
513 
514     if (iscsilun->allocmap_valid == NULL) {
515         return;
516     }
517     if (valid) {
518         if (nb_cls_shrunk > 0) {
519             bitmap_set(iscsilun->allocmap_valid, cl_num_shrunk, nb_cls_shrunk);
520         }
521     } else {
522         bitmap_clear(iscsilun->allocmap_valid, cl_num_expanded,
523                      nb_cls_expanded);
524     }
525 }
526 
527 static void
528 iscsi_allocmap_set_allocated(IscsiLun *iscsilun, int64_t sector_num,
529                              int nb_sectors)
530 {
531     iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, true, true);
532 }
533 
534 static void
535 iscsi_allocmap_set_unallocated(IscsiLun *iscsilun, int64_t sector_num,
536                                int nb_sectors)
537 {
538     /* Note: if cache.direct=on the fifth argument to iscsi_allocmap_update
539      * is ignored, so this will in effect be an iscsi_allocmap_set_invalid.
540      */
541     iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, true);
542 }
543 
544 static void iscsi_allocmap_set_invalid(IscsiLun *iscsilun, int64_t sector_num,
545                                        int nb_sectors)
546 {
547     iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, false);
548 }
549 
550 static void iscsi_allocmap_invalidate(IscsiLun *iscsilun)
551 {
552     if (iscsilun->allocmap) {
553         bitmap_zero(iscsilun->allocmap, iscsilun->allocmap_size);
554     }
555     if (iscsilun->allocmap_valid) {
556         bitmap_zero(iscsilun->allocmap_valid, iscsilun->allocmap_size);
557     }
558 }
559 
560 static inline bool
561 iscsi_allocmap_is_allocated(IscsiLun *iscsilun, int64_t sector_num,
562                             int nb_sectors)
563 {
564     unsigned long size;
565     if (iscsilun->allocmap == NULL) {
566         return true;
567     }
568     size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors);
569     return !(find_next_bit(iscsilun->allocmap, size,
570                            sector_num / iscsilun->cluster_sectors) == size);
571 }
572 
573 static inline bool iscsi_allocmap_is_valid(IscsiLun *iscsilun,
574                                            int64_t sector_num, int nb_sectors)
575 {
576     unsigned long size;
577     if (iscsilun->allocmap_valid == NULL) {
578         return false;
579     }
580     size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors);
581     return (find_next_zero_bit(iscsilun->allocmap_valid, size,
582                                sector_num / iscsilun->cluster_sectors) == size);
583 }
584 
585 static int coroutine_fn
586 iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
587                       QEMUIOVector *iov, int flags)
588 {
589     IscsiLun *iscsilun = bs->opaque;
590     struct IscsiTask iTask;
591     uint64_t lba;
592     uint32_t num_sectors;
593     bool fua = flags & BDRV_REQ_FUA;
594     int r = 0;
595 
596     if (fua) {
597         assert(iscsilun->dpofua);
598     }
599     if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
600         return -EINVAL;
601     }
602 
603     if (bs->bl.max_transfer) {
604         assert(nb_sectors << BDRV_SECTOR_BITS <= bs->bl.max_transfer);
605     }
606 
607     lba = sector_qemu2lun(sector_num, iscsilun);
608     num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
609     iscsi_co_init_iscsitask(iscsilun, &iTask);
610     qemu_mutex_lock(&iscsilun->mutex);
611 retry:
612     if (iscsilun->use_16_for_rw) {
613 #if LIBISCSI_API_VERSION >= (20160603)
614         iTask.task = iscsi_write16_iov_task(iscsilun->iscsi, iscsilun->lun, lba,
615                                             NULL, num_sectors * iscsilun->block_size,
616                                             iscsilun->block_size, 0, 0, fua, 0, 0,
617                                             iscsi_co_generic_cb, &iTask,
618                                             (struct scsi_iovec *)iov->iov, iov->niov);
619     } else {
620         iTask.task = iscsi_write10_iov_task(iscsilun->iscsi, iscsilun->lun, lba,
621                                             NULL, num_sectors * iscsilun->block_size,
622                                             iscsilun->block_size, 0, 0, fua, 0, 0,
623                                             iscsi_co_generic_cb, &iTask,
624                                             (struct scsi_iovec *)iov->iov, iov->niov);
625     }
626 #else
627         iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba,
628                                         NULL, num_sectors * iscsilun->block_size,
629                                         iscsilun->block_size, 0, 0, fua, 0, 0,
630                                         iscsi_co_generic_cb, &iTask);
631     } else {
632         iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba,
633                                         NULL, num_sectors * iscsilun->block_size,
634                                         iscsilun->block_size, 0, 0, fua, 0, 0,
635                                         iscsi_co_generic_cb, &iTask);
636     }
637 #endif
638     if (iTask.task == NULL) {
639         qemu_mutex_unlock(&iscsilun->mutex);
640         return -ENOMEM;
641     }
642 #if LIBISCSI_API_VERSION < (20160603)
643     scsi_task_set_iov_out(iTask.task, (struct scsi_iovec *) iov->iov,
644                           iov->niov);
645 #endif
646     while (!iTask.complete) {
647         iscsi_set_events(iscsilun);
648         qemu_mutex_unlock(&iscsilun->mutex);
649         qemu_coroutine_yield();
650         qemu_mutex_lock(&iscsilun->mutex);
651     }
652 
653     if (iTask.task != NULL) {
654         scsi_free_scsi_task(iTask.task);
655         iTask.task = NULL;
656     }
657 
658     if (iTask.do_retry) {
659         iTask.complete = 0;
660         goto retry;
661     }
662 
663     if (iTask.status != SCSI_STATUS_GOOD) {
664         iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
665         r = iTask.err_code;
666         goto out_unlock;
667     }
668 
669     iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
670 
671 out_unlock:
672     qemu_mutex_unlock(&iscsilun->mutex);
673     return r;
674 }
675 
676 
677 
678 static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
679                                                   int64_t sector_num,
680                                                   int nb_sectors, int *pnum,
681                                                   BlockDriverState **file)
682 {
683     IscsiLun *iscsilun = bs->opaque;
684     struct scsi_get_lba_status *lbas = NULL;
685     struct scsi_lba_status_descriptor *lbasd = NULL;
686     struct IscsiTask iTask;
687     int64_t ret;
688 
689     iscsi_co_init_iscsitask(iscsilun, &iTask);
690 
691     if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
692         ret = -EINVAL;
693         goto out;
694     }
695 
696     /* default to all sectors allocated */
697     ret = BDRV_BLOCK_DATA;
698     ret |= (sector_num << BDRV_SECTOR_BITS) | BDRV_BLOCK_OFFSET_VALID;
699     *pnum = nb_sectors;
700 
701     /* LUN does not support logical block provisioning */
702     if (!iscsilun->lbpme) {
703         goto out;
704     }
705 
706     qemu_mutex_lock(&iscsilun->mutex);
707 retry:
708     if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
709                                   sector_qemu2lun(sector_num, iscsilun),
710                                   8 + 16, iscsi_co_generic_cb,
711                                   &iTask) == NULL) {
712         ret = -ENOMEM;
713         goto out_unlock;
714     }
715 
716     while (!iTask.complete) {
717         iscsi_set_events(iscsilun);
718         qemu_mutex_unlock(&iscsilun->mutex);
719         qemu_coroutine_yield();
720         qemu_mutex_lock(&iscsilun->mutex);
721     }
722 
723     if (iTask.do_retry) {
724         if (iTask.task != NULL) {
725             scsi_free_scsi_task(iTask.task);
726             iTask.task = NULL;
727         }
728         iTask.complete = 0;
729         goto retry;
730     }
731 
732     if (iTask.status != SCSI_STATUS_GOOD) {
733         /* in case the get_lba_status_callout fails (i.e.
734          * because the device is busy or the cmd is not
735          * supported) we pretend all blocks are allocated
736          * for backwards compatibility */
737         goto out_unlock;
738     }
739 
740     lbas = scsi_datain_unmarshall(iTask.task);
741     if (lbas == NULL) {
742         ret = -EIO;
743         goto out_unlock;
744     }
745 
746     lbasd = &lbas->descriptors[0];
747 
748     if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) {
749         ret = -EIO;
750         goto out_unlock;
751     }
752 
753     *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun);
754 
755     if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED ||
756         lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) {
757         ret &= ~BDRV_BLOCK_DATA;
758         if (iscsilun->lbprz) {
759             ret |= BDRV_BLOCK_ZERO;
760         }
761     }
762 
763     if (ret & BDRV_BLOCK_ZERO) {
764         iscsi_allocmap_set_unallocated(iscsilun, sector_num, *pnum);
765     } else {
766         iscsi_allocmap_set_allocated(iscsilun, sector_num, *pnum);
767     }
768 
769     if (*pnum > nb_sectors) {
770         *pnum = nb_sectors;
771     }
772 out_unlock:
773     qemu_mutex_unlock(&iscsilun->mutex);
774 out:
775     if (iTask.task != NULL) {
776         scsi_free_scsi_task(iTask.task);
777     }
778     if (ret > 0 && ret & BDRV_BLOCK_OFFSET_VALID) {
779         *file = bs;
780     }
781     return ret;
782 }
783 
784 static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
785                                        int64_t sector_num, int nb_sectors,
786                                        QEMUIOVector *iov)
787 {
788     IscsiLun *iscsilun = bs->opaque;
789     struct IscsiTask iTask;
790     uint64_t lba;
791     uint32_t num_sectors;
792 
793     if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
794         return -EINVAL;
795     }
796 
797     if (bs->bl.max_transfer) {
798         assert(nb_sectors << BDRV_SECTOR_BITS <= bs->bl.max_transfer);
799     }
800 
801     /* if cache.direct is off and we have a valid entry in our allocation map
802      * we can skip checking the block status and directly return zeroes if
803      * the request falls within an unallocated area */
804     if (iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) &&
805         !iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
806             qemu_iovec_memset(iov, 0, 0x00, iov->size);
807             return 0;
808     }
809 
810     if (nb_sectors >= ISCSI_CHECKALLOC_THRES &&
811         !iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) &&
812         !iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
813         int pnum;
814         BlockDriverState *file;
815         /* check the block status from the beginning of the cluster
816          * containing the start sector */
817         int64_t ret = iscsi_co_get_block_status(bs,
818                           sector_num - sector_num % iscsilun->cluster_sectors,
819                           BDRV_REQUEST_MAX_SECTORS, &pnum, &file);
820         if (ret < 0) {
821             return ret;
822         }
823         /* if the whole request falls into an unallocated area we can avoid
824          * to read and directly return zeroes instead */
825         if (ret & BDRV_BLOCK_ZERO &&
826             pnum >= nb_sectors + sector_num % iscsilun->cluster_sectors) {
827             qemu_iovec_memset(iov, 0, 0x00, iov->size);
828             return 0;
829         }
830     }
831 
832     lba = sector_qemu2lun(sector_num, iscsilun);
833     num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
834 
835     iscsi_co_init_iscsitask(iscsilun, &iTask);
836     qemu_mutex_lock(&iscsilun->mutex);
837 retry:
838     if (iscsilun->use_16_for_rw) {
839 #if LIBISCSI_API_VERSION >= (20160603)
840         iTask.task = iscsi_read16_iov_task(iscsilun->iscsi, iscsilun->lun, lba,
841                                            num_sectors * iscsilun->block_size,
842                                            iscsilun->block_size, 0, 0, 0, 0, 0,
843                                            iscsi_co_generic_cb, &iTask,
844                                            (struct scsi_iovec *)iov->iov, iov->niov);
845     } else {
846         iTask.task = iscsi_read10_iov_task(iscsilun->iscsi, iscsilun->lun, lba,
847                                            num_sectors * iscsilun->block_size,
848                                            iscsilun->block_size,
849                                            0, 0, 0, 0, 0,
850                                            iscsi_co_generic_cb, &iTask,
851                                            (struct scsi_iovec *)iov->iov, iov->niov);
852     }
853 #else
854         iTask.task = iscsi_read16_task(iscsilun->iscsi, iscsilun->lun, lba,
855                                        num_sectors * iscsilun->block_size,
856                                        iscsilun->block_size, 0, 0, 0, 0, 0,
857                                        iscsi_co_generic_cb, &iTask);
858     } else {
859         iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba,
860                                        num_sectors * iscsilun->block_size,
861                                        iscsilun->block_size,
862                                        0, 0, 0, 0, 0,
863                                        iscsi_co_generic_cb, &iTask);
864     }
865 #endif
866     if (iTask.task == NULL) {
867         qemu_mutex_unlock(&iscsilun->mutex);
868         return -ENOMEM;
869     }
870 #if LIBISCSI_API_VERSION < (20160603)
871     scsi_task_set_iov_in(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov);
872 #endif
873     while (!iTask.complete) {
874         iscsi_set_events(iscsilun);
875         qemu_mutex_unlock(&iscsilun->mutex);
876         qemu_coroutine_yield();
877         qemu_mutex_lock(&iscsilun->mutex);
878     }
879 
880     if (iTask.task != NULL) {
881         scsi_free_scsi_task(iTask.task);
882         iTask.task = NULL;
883     }
884 
885     if (iTask.do_retry) {
886         iTask.complete = 0;
887         goto retry;
888     }
889     qemu_mutex_unlock(&iscsilun->mutex);
890 
891     if (iTask.status != SCSI_STATUS_GOOD) {
892         return iTask.err_code;
893     }
894 
895     return 0;
896 }
897 
898 static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
899 {
900     IscsiLun *iscsilun = bs->opaque;
901     struct IscsiTask iTask;
902 
903     iscsi_co_init_iscsitask(iscsilun, &iTask);
904     qemu_mutex_lock(&iscsilun->mutex);
905 retry:
906     if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0,
907                                       0, iscsi_co_generic_cb, &iTask) == NULL) {
908         qemu_mutex_unlock(&iscsilun->mutex);
909         return -ENOMEM;
910     }
911 
912     while (!iTask.complete) {
913         iscsi_set_events(iscsilun);
914         qemu_mutex_unlock(&iscsilun->mutex);
915         qemu_coroutine_yield();
916         qemu_mutex_lock(&iscsilun->mutex);
917     }
918 
919     if (iTask.task != NULL) {
920         scsi_free_scsi_task(iTask.task);
921         iTask.task = NULL;
922     }
923 
924     if (iTask.do_retry) {
925         iTask.complete = 0;
926         goto retry;
927     }
928     qemu_mutex_unlock(&iscsilun->mutex);
929 
930     if (iTask.status != SCSI_STATUS_GOOD) {
931         return iTask.err_code;
932     }
933 
934     return 0;
935 }
936 
937 #ifdef __linux__
938 /* Called (via iscsi_service) with QemuMutex held.  */
939 static void
940 iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
941                      void *command_data, void *opaque)
942 {
943     IscsiAIOCB *acb = opaque;
944 
945     g_free(acb->buf);
946     acb->buf = NULL;
947 
948     acb->status = 0;
949     if (status < 0) {
950         error_report("Failed to ioctl(SG_IO) to iSCSI lun. %s",
951                      iscsi_get_error(iscsi));
952         acb->status = iscsi_translate_sense(&acb->task->sense);
953     }
954 
955     acb->ioh->driver_status = 0;
956     acb->ioh->host_status   = 0;
957     acb->ioh->resid         = 0;
958     acb->ioh->status        = status;
959 
960 #define SG_ERR_DRIVER_SENSE    0x08
961 
962     if (status == SCSI_STATUS_CHECK_CONDITION && acb->task->datain.size >= 2) {
963         int ss;
964 
965         acb->ioh->driver_status |= SG_ERR_DRIVER_SENSE;
966 
967         acb->ioh->sb_len_wr = acb->task->datain.size - 2;
968         ss = (acb->ioh->mx_sb_len >= acb->ioh->sb_len_wr) ?
969              acb->ioh->mx_sb_len : acb->ioh->sb_len_wr;
970         memcpy(acb->ioh->sbp, &acb->task->datain.data[2], ss);
971     }
972 
973     iscsi_schedule_bh(acb);
974 }
975 
976 static void iscsi_ioctl_bh_completion(void *opaque)
977 {
978     IscsiAIOCB *acb = opaque;
979 
980     qemu_bh_delete(acb->bh);
981     acb->common.cb(acb->common.opaque, acb->ret);
982     qemu_aio_unref(acb);
983 }
984 
985 static void iscsi_ioctl_handle_emulated(IscsiAIOCB *acb, int req, void *buf)
986 {
987     BlockDriverState *bs = acb->common.bs;
988     IscsiLun *iscsilun = bs->opaque;
989     int ret = 0;
990 
991     switch (req) {
992     case SG_GET_VERSION_NUM:
993         *(int *)buf = 30000;
994         break;
995     case SG_GET_SCSI_ID:
996         ((struct sg_scsi_id *)buf)->scsi_type = iscsilun->type;
997         break;
998     default:
999         ret = -EINVAL;
1000     }
1001     assert(!acb->bh);
1002     acb->bh = aio_bh_new(bdrv_get_aio_context(bs),
1003                          iscsi_ioctl_bh_completion, acb);
1004     acb->ret = ret;
1005     qemu_bh_schedule(acb->bh);
1006 }
1007 
1008 static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
1009         unsigned long int req, void *buf,
1010         BlockCompletionFunc *cb, void *opaque)
1011 {
1012     IscsiLun *iscsilun = bs->opaque;
1013     struct iscsi_context *iscsi = iscsilun->iscsi;
1014     struct iscsi_data data;
1015     IscsiAIOCB *acb;
1016 
1017     acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
1018 
1019     acb->iscsilun = iscsilun;
1020     acb->bh          = NULL;
1021     acb->status      = -EINPROGRESS;
1022     acb->buf         = NULL;
1023     acb->ioh         = buf;
1024 
1025     if (req != SG_IO) {
1026         iscsi_ioctl_handle_emulated(acb, req, buf);
1027         return &acb->common;
1028     }
1029 
1030     if (acb->ioh->cmd_len > SCSI_CDB_MAX_SIZE) {
1031         error_report("iSCSI: ioctl error CDB exceeds max size (%d > %d)",
1032                      acb->ioh->cmd_len, SCSI_CDB_MAX_SIZE);
1033         qemu_aio_unref(acb);
1034         return NULL;
1035     }
1036 
1037     acb->task = malloc(sizeof(struct scsi_task));
1038     if (acb->task == NULL) {
1039         error_report("iSCSI: Failed to allocate task for scsi command. %s",
1040                      iscsi_get_error(iscsi));
1041         qemu_aio_unref(acb);
1042         return NULL;
1043     }
1044     memset(acb->task, 0, sizeof(struct scsi_task));
1045 
1046     switch (acb->ioh->dxfer_direction) {
1047     case SG_DXFER_TO_DEV:
1048         acb->task->xfer_dir = SCSI_XFER_WRITE;
1049         break;
1050     case SG_DXFER_FROM_DEV:
1051         acb->task->xfer_dir = SCSI_XFER_READ;
1052         break;
1053     default:
1054         acb->task->xfer_dir = SCSI_XFER_NONE;
1055         break;
1056     }
1057 
1058     acb->task->cdb_size = acb->ioh->cmd_len;
1059     memcpy(&acb->task->cdb[0], acb->ioh->cmdp, acb->ioh->cmd_len);
1060     acb->task->expxferlen = acb->ioh->dxfer_len;
1061 
1062     data.size = 0;
1063     qemu_mutex_lock(&iscsilun->mutex);
1064     if (acb->task->xfer_dir == SCSI_XFER_WRITE) {
1065         if (acb->ioh->iovec_count == 0) {
1066             data.data = acb->ioh->dxferp;
1067             data.size = acb->ioh->dxfer_len;
1068         } else {
1069             scsi_task_set_iov_out(acb->task,
1070                                  (struct scsi_iovec *) acb->ioh->dxferp,
1071                                  acb->ioh->iovec_count);
1072         }
1073     }
1074 
1075     if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task,
1076                                  iscsi_aio_ioctl_cb,
1077                                  (data.size > 0) ? &data : NULL,
1078                                  acb) != 0) {
1079         qemu_mutex_unlock(&iscsilun->mutex);
1080         scsi_free_scsi_task(acb->task);
1081         qemu_aio_unref(acb);
1082         return NULL;
1083     }
1084 
1085     /* tell libiscsi to read straight into the buffer we got from ioctl */
1086     if (acb->task->xfer_dir == SCSI_XFER_READ) {
1087         if (acb->ioh->iovec_count == 0) {
1088             scsi_task_add_data_in_buffer(acb->task,
1089                                          acb->ioh->dxfer_len,
1090                                          acb->ioh->dxferp);
1091         } else {
1092             scsi_task_set_iov_in(acb->task,
1093                                  (struct scsi_iovec *) acb->ioh->dxferp,
1094                                  acb->ioh->iovec_count);
1095         }
1096     }
1097 
1098     iscsi_set_events(iscsilun);
1099     qemu_mutex_unlock(&iscsilun->mutex);
1100 
1101     return &acb->common;
1102 }
1103 
1104 #endif
1105 
1106 static int64_t
1107 iscsi_getlength(BlockDriverState *bs)
1108 {
1109     IscsiLun *iscsilun = bs->opaque;
1110     int64_t len;
1111 
1112     len  = iscsilun->num_blocks;
1113     len *= iscsilun->block_size;
1114 
1115     return len;
1116 }
1117 
1118 static int
1119 coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
1120 {
1121     IscsiLun *iscsilun = bs->opaque;
1122     struct IscsiTask iTask;
1123     struct unmap_list list;
1124     int r = 0;
1125 
1126     if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
1127         return -ENOTSUP;
1128     }
1129 
1130     if (!iscsilun->lbp.lbpu) {
1131         /* UNMAP is not supported by the target */
1132         return 0;
1133     }
1134 
1135     list.lba = offset / iscsilun->block_size;
1136     list.num = count / iscsilun->block_size;
1137 
1138     iscsi_co_init_iscsitask(iscsilun, &iTask);
1139     qemu_mutex_lock(&iscsilun->mutex);
1140 retry:
1141     if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
1142                          iscsi_co_generic_cb, &iTask) == NULL) {
1143         r = -ENOMEM;
1144         goto out_unlock;
1145     }
1146 
1147     while (!iTask.complete) {
1148         iscsi_set_events(iscsilun);
1149         qemu_mutex_unlock(&iscsilun->mutex);
1150         qemu_coroutine_yield();
1151         qemu_mutex_lock(&iscsilun->mutex);
1152     }
1153 
1154     if (iTask.task != NULL) {
1155         scsi_free_scsi_task(iTask.task);
1156         iTask.task = NULL;
1157     }
1158 
1159     if (iTask.do_retry) {
1160         iTask.complete = 0;
1161         goto retry;
1162     }
1163 
1164     if (iTask.status == SCSI_STATUS_CHECK_CONDITION) {
1165         /* the target might fail with a check condition if it
1166            is not happy with the alignment of the UNMAP request
1167            we silently fail in this case */
1168         goto out_unlock;
1169     }
1170 
1171     if (iTask.status != SCSI_STATUS_GOOD) {
1172         r = iTask.err_code;
1173         goto out_unlock;
1174     }
1175 
1176     iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
1177                                count >> BDRV_SECTOR_BITS);
1178 
1179 out_unlock:
1180     qemu_mutex_unlock(&iscsilun->mutex);
1181     return r;
1182 }
1183 
1184 static int
1185 coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1186                                     int count, BdrvRequestFlags flags)
1187 {
1188     IscsiLun *iscsilun = bs->opaque;
1189     struct IscsiTask iTask;
1190     uint64_t lba;
1191     uint32_t nb_blocks;
1192     bool use_16_for_ws = iscsilun->use_16_for_rw;
1193     int r = 0;
1194 
1195     if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
1196         return -ENOTSUP;
1197     }
1198 
1199     if (flags & BDRV_REQ_MAY_UNMAP) {
1200         if (!use_16_for_ws && !iscsilun->lbp.lbpws10) {
1201             /* WRITESAME10 with UNMAP is unsupported try WRITESAME16 */
1202             use_16_for_ws = true;
1203         }
1204         if (use_16_for_ws && !iscsilun->lbp.lbpws) {
1205             /* WRITESAME16 with UNMAP is not supported by the target,
1206              * fall back and try WRITESAME10/16 without UNMAP */
1207             flags &= ~BDRV_REQ_MAY_UNMAP;
1208             use_16_for_ws = iscsilun->use_16_for_rw;
1209         }
1210     }
1211 
1212     if (!(flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->has_write_same) {
1213         /* WRITESAME without UNMAP is not supported by the target */
1214         return -ENOTSUP;
1215     }
1216 
1217     lba = offset / iscsilun->block_size;
1218     nb_blocks = count / iscsilun->block_size;
1219 
1220     if (iscsilun->zeroblock == NULL) {
1221         iscsilun->zeroblock = g_try_malloc0(iscsilun->block_size);
1222         if (iscsilun->zeroblock == NULL) {
1223             return -ENOMEM;
1224         }
1225     }
1226 
1227     qemu_mutex_lock(&iscsilun->mutex);
1228     iscsi_co_init_iscsitask(iscsilun, &iTask);
1229 retry:
1230     if (use_16_for_ws) {
1231         iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
1232                                             iscsilun->zeroblock, iscsilun->block_size,
1233                                             nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
1234                                             0, 0, iscsi_co_generic_cb, &iTask);
1235     } else {
1236         iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
1237                                             iscsilun->zeroblock, iscsilun->block_size,
1238                                             nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
1239                                             0, 0, iscsi_co_generic_cb, &iTask);
1240     }
1241     if (iTask.task == NULL) {
1242         qemu_mutex_unlock(&iscsilun->mutex);
1243         return -ENOMEM;
1244     }
1245 
1246     while (!iTask.complete) {
1247         iscsi_set_events(iscsilun);
1248         qemu_mutex_unlock(&iscsilun->mutex);
1249         qemu_coroutine_yield();
1250         qemu_mutex_lock(&iscsilun->mutex);
1251     }
1252 
1253     if (iTask.status == SCSI_STATUS_CHECK_CONDITION &&
1254         iTask.task->sense.key == SCSI_SENSE_ILLEGAL_REQUEST &&
1255         (iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE ||
1256          iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_FIELD_IN_CDB)) {
1257         /* WRITE SAME is not supported by the target */
1258         iscsilun->has_write_same = false;
1259         scsi_free_scsi_task(iTask.task);
1260         r = -ENOTSUP;
1261         goto out_unlock;
1262     }
1263 
1264     if (iTask.task != NULL) {
1265         scsi_free_scsi_task(iTask.task);
1266         iTask.task = NULL;
1267     }
1268 
1269     if (iTask.do_retry) {
1270         iTask.complete = 0;
1271         goto retry;
1272     }
1273 
1274     if (iTask.status != SCSI_STATUS_GOOD) {
1275         iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
1276                                    count >> BDRV_SECTOR_BITS);
1277         r = iTask.err_code;
1278         goto out_unlock;
1279     }
1280 
1281     if (flags & BDRV_REQ_MAY_UNMAP) {
1282         iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
1283                                    count >> BDRV_SECTOR_BITS);
1284     } else {
1285         iscsi_allocmap_set_allocated(iscsilun, offset >> BDRV_SECTOR_BITS,
1286                                      count >> BDRV_SECTOR_BITS);
1287     }
1288 
1289 out_unlock:
1290     qemu_mutex_unlock(&iscsilun->mutex);
1291     return r;
1292 }
1293 
1294 static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts,
1295                        Error **errp)
1296 {
1297     const char *user = NULL;
1298     const char *password = NULL;
1299     const char *secretid;
1300     char *secret = NULL;
1301 
1302     user = qemu_opt_get(opts, "user");
1303     if (!user) {
1304         return;
1305     }
1306 
1307     secretid = qemu_opt_get(opts, "password-secret");
1308     password = qemu_opt_get(opts, "password");
1309     if (secretid && password) {
1310         error_setg(errp, "'password' and 'password-secret' properties are "
1311                    "mutually exclusive");
1312         return;
1313     }
1314     if (secretid) {
1315         secret = qcrypto_secret_lookup_as_utf8(secretid, errp);
1316         if (!secret) {
1317             return;
1318         }
1319         password = secret;
1320     } else if (!password) {
1321         error_setg(errp, "CHAP username specified but no password was given");
1322         return;
1323     }
1324 
1325     if (iscsi_set_initiator_username_pwd(iscsi, user, password)) {
1326         error_setg(errp, "Failed to set initiator username and password");
1327     }
1328 
1329     g_free(secret);
1330 }
1331 
1332 static void apply_header_digest(struct iscsi_context *iscsi, QemuOpts *opts,
1333                                 Error **errp)
1334 {
1335     const char *digest = NULL;
1336 
1337     digest = qemu_opt_get(opts, "header-digest");
1338     if (!digest) {
1339         iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C);
1340     } else if (!strcmp(digest, "crc32c")) {
1341         iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_CRC32C);
1342     } else if (!strcmp(digest, "none")) {
1343         iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE);
1344     } else if (!strcmp(digest, "crc32c-none")) {
1345         iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_CRC32C_NONE);
1346     } else if (!strcmp(digest, "none-crc32c")) {
1347         iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C);
1348     } else {
1349         error_setg(errp, "Invalid header-digest setting : %s", digest);
1350     }
1351 }
1352 
1353 static char *get_initiator_name(QemuOpts *opts)
1354 {
1355     const char *name;
1356     char *iscsi_name;
1357     UuidInfo *uuid_info;
1358 
1359     name = qemu_opt_get(opts, "initiator-name");
1360     if (name) {
1361         return g_strdup(name);
1362     }
1363 
1364     uuid_info = qmp_query_uuid(NULL);
1365     if (strcmp(uuid_info->UUID, UUID_NONE) == 0) {
1366         name = qemu_get_vm_name();
1367     } else {
1368         name = uuid_info->UUID;
1369     }
1370     iscsi_name = g_strdup_printf("iqn.2008-11.org.linux-kvm%s%s",
1371                                  name ? ":" : "", name ? name : "");
1372     qapi_free_UuidInfo(uuid_info);
1373     return iscsi_name;
1374 }
1375 
1376 static void iscsi_nop_timed_event(void *opaque)
1377 {
1378     IscsiLun *iscsilun = opaque;
1379 
1380     qemu_mutex_lock(&iscsilun->mutex);
1381     if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
1382         error_report("iSCSI: NOP timeout. Reconnecting...");
1383         iscsilun->request_timed_out = true;
1384     } else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
1385         error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
1386         goto out;
1387     }
1388 
1389     timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
1390     iscsi_set_events(iscsilun);
1391 
1392 out:
1393     qemu_mutex_unlock(&iscsilun->mutex);
1394 }
1395 
1396 static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
1397 {
1398     struct scsi_task *task = NULL;
1399     struct scsi_readcapacity10 *rc10 = NULL;
1400     struct scsi_readcapacity16 *rc16 = NULL;
1401     int retries = ISCSI_CMD_RETRIES;
1402 
1403     do {
1404         if (task != NULL) {
1405             scsi_free_scsi_task(task);
1406             task = NULL;
1407         }
1408 
1409         switch (iscsilun->type) {
1410         case TYPE_DISK:
1411             task = iscsi_readcapacity16_sync(iscsilun->iscsi, iscsilun->lun);
1412             if (task != NULL && task->status == SCSI_STATUS_GOOD) {
1413                 rc16 = scsi_datain_unmarshall(task);
1414                 if (rc16 == NULL) {
1415                     error_setg(errp, "iSCSI: Failed to unmarshall readcapacity16 data.");
1416                 } else {
1417                     iscsilun->block_size = rc16->block_length;
1418                     iscsilun->num_blocks = rc16->returned_lba + 1;
1419                     iscsilun->lbpme = !!rc16->lbpme;
1420                     iscsilun->lbprz = !!rc16->lbprz;
1421                     iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff);
1422                 }
1423                 break;
1424             }
1425             if (task != NULL && task->status == SCSI_STATUS_CHECK_CONDITION
1426                 && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
1427                 break;
1428             }
1429             /* Fall through and try READ CAPACITY(10) instead.  */
1430         case TYPE_ROM:
1431             task = iscsi_readcapacity10_sync(iscsilun->iscsi, iscsilun->lun, 0, 0);
1432             if (task != NULL && task->status == SCSI_STATUS_GOOD) {
1433                 rc10 = scsi_datain_unmarshall(task);
1434                 if (rc10 == NULL) {
1435                     error_setg(errp, "iSCSI: Failed to unmarshall readcapacity10 data.");
1436                 } else {
1437                     iscsilun->block_size = rc10->block_size;
1438                     if (rc10->lba == 0) {
1439                         /* blank disk loaded */
1440                         iscsilun->num_blocks = 0;
1441                     } else {
1442                         iscsilun->num_blocks = rc10->lba + 1;
1443                     }
1444                 }
1445             }
1446             break;
1447         default:
1448             return;
1449         }
1450     } while (task != NULL && task->status == SCSI_STATUS_CHECK_CONDITION
1451              && task->sense.key == SCSI_SENSE_UNIT_ATTENTION
1452              && retries-- > 0);
1453 
1454     if (task == NULL || task->status != SCSI_STATUS_GOOD) {
1455         error_setg(errp, "iSCSI: failed to send readcapacity10/16 command");
1456     } else if (!iscsilun->block_size ||
1457                iscsilun->block_size % BDRV_SECTOR_SIZE) {
1458         error_setg(errp, "iSCSI: the target returned an invalid "
1459                    "block size of %d.", iscsilun->block_size);
1460     }
1461     if (task) {
1462         scsi_free_scsi_task(task);
1463     }
1464 }
1465 
1466 static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun,
1467                                           int evpd, int pc, void **inq, Error **errp)
1468 {
1469     int full_size;
1470     struct scsi_task *task = NULL;
1471     task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, 64);
1472     if (task == NULL || task->status != SCSI_STATUS_GOOD) {
1473         goto fail;
1474     }
1475     full_size = scsi_datain_getfullsize(task);
1476     if (full_size > task->datain.size) {
1477         scsi_free_scsi_task(task);
1478 
1479         /* we need more data for the full list */
1480         task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, full_size);
1481         if (task == NULL || task->status != SCSI_STATUS_GOOD) {
1482             goto fail;
1483         }
1484     }
1485 
1486     *inq = scsi_datain_unmarshall(task);
1487     if (*inq == NULL) {
1488         error_setg(errp, "iSCSI: failed to unmarshall inquiry datain blob");
1489         goto fail_with_err;
1490     }
1491 
1492     return task;
1493 
1494 fail:
1495     error_setg(errp, "iSCSI: Inquiry command failed : %s",
1496                iscsi_get_error(iscsi));
1497 fail_with_err:
1498     if (task != NULL) {
1499         scsi_free_scsi_task(task);
1500     }
1501     return NULL;
1502 }
1503 
1504 static void iscsi_detach_aio_context(BlockDriverState *bs)
1505 {
1506     IscsiLun *iscsilun = bs->opaque;
1507 
1508     aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
1509                        false, NULL, NULL, NULL, NULL);
1510     iscsilun->events = 0;
1511 
1512     if (iscsilun->nop_timer) {
1513         timer_del(iscsilun->nop_timer);
1514         timer_free(iscsilun->nop_timer);
1515         iscsilun->nop_timer = NULL;
1516     }
1517     if (iscsilun->event_timer) {
1518         timer_del(iscsilun->event_timer);
1519         timer_free(iscsilun->event_timer);
1520         iscsilun->event_timer = NULL;
1521     }
1522 }
1523 
1524 static void iscsi_attach_aio_context(BlockDriverState *bs,
1525                                      AioContext *new_context)
1526 {
1527     IscsiLun *iscsilun = bs->opaque;
1528 
1529     iscsilun->aio_context = new_context;
1530     iscsi_set_events(iscsilun);
1531 
1532     /* Set up a timer for sending out iSCSI NOPs */
1533     iscsilun->nop_timer = aio_timer_new(iscsilun->aio_context,
1534                                         QEMU_CLOCK_REALTIME, SCALE_MS,
1535                                         iscsi_nop_timed_event, iscsilun);
1536     timer_mod(iscsilun->nop_timer,
1537               qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
1538 
1539     /* Set up a timer for periodic calls to iscsi_set_events and to
1540      * scan for command timeout */
1541     iscsilun->event_timer = aio_timer_new(iscsilun->aio_context,
1542                                           QEMU_CLOCK_REALTIME, SCALE_MS,
1543                                           iscsi_timed_check_events, iscsilun);
1544     timer_mod(iscsilun->event_timer,
1545               qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL);
1546 }
1547 
1548 static void iscsi_modesense_sync(IscsiLun *iscsilun)
1549 {
1550     struct scsi_task *task;
1551     struct scsi_mode_sense *ms = NULL;
1552     iscsilun->write_protected = false;
1553     iscsilun->dpofua = false;
1554 
1555     task = iscsi_modesense6_sync(iscsilun->iscsi, iscsilun->lun,
1556                                  1, SCSI_MODESENSE_PC_CURRENT,
1557                                  0x3F, 0, 255);
1558     if (task == NULL) {
1559         error_report("iSCSI: Failed to send MODE_SENSE(6) command: %s",
1560                      iscsi_get_error(iscsilun->iscsi));
1561         goto out;
1562     }
1563 
1564     if (task->status != SCSI_STATUS_GOOD) {
1565         error_report("iSCSI: Failed MODE_SENSE(6), LUN assumed writable");
1566         goto out;
1567     }
1568     ms = scsi_datain_unmarshall(task);
1569     if (!ms) {
1570         error_report("iSCSI: Failed to unmarshall MODE_SENSE(6) data: %s",
1571                      iscsi_get_error(iscsilun->iscsi));
1572         goto out;
1573     }
1574     iscsilun->write_protected = ms->device_specific_parameter & 0x80;
1575     iscsilun->dpofua          = ms->device_specific_parameter & 0x10;
1576 
1577 out:
1578     if (task) {
1579         scsi_free_scsi_task(task);
1580     }
1581 }
1582 
1583 static void iscsi_parse_iscsi_option(const char *target, QDict *options)
1584 {
1585     QemuOptsList *list;
1586     QemuOpts *opts;
1587     const char *user, *password, *password_secret, *initiator_name,
1588                *header_digest, *timeout;
1589 
1590     list = qemu_find_opts("iscsi");
1591     if (!list) {
1592         return;
1593     }
1594 
1595     opts = qemu_opts_find(list, target);
1596     if (opts == NULL) {
1597         opts = QTAILQ_FIRST(&list->head);
1598         if (!opts) {
1599             return;
1600         }
1601     }
1602 
1603     user = qemu_opt_get(opts, "user");
1604     if (user) {
1605         qdict_set_default_str(options, "user", user);
1606     }
1607 
1608     password = qemu_opt_get(opts, "password");
1609     if (password) {
1610         qdict_set_default_str(options, "password", password);
1611     }
1612 
1613     password_secret = qemu_opt_get(opts, "password-secret");
1614     if (password_secret) {
1615         qdict_set_default_str(options, "password-secret", password_secret);
1616     }
1617 
1618     initiator_name = qemu_opt_get(opts, "initiator-name");
1619     if (initiator_name) {
1620         qdict_set_default_str(options, "initiator-name", initiator_name);
1621     }
1622 
1623     header_digest = qemu_opt_get(opts, "header-digest");
1624     if (header_digest) {
1625         /* -iscsi takes upper case values, but QAPI only supports lower case
1626          * enum constant names, so we have to convert here. */
1627         char *qapi_value = g_ascii_strdown(header_digest, -1);
1628         qdict_set_default_str(options, "header-digest", qapi_value);
1629         g_free(qapi_value);
1630     }
1631 
1632     timeout = qemu_opt_get(opts, "timeout");
1633     if (timeout) {
1634         qdict_set_default_str(options, "timeout", timeout);
1635     }
1636 }
1637 
1638 /*
1639  * We support iscsi url's on the form
1640  * iscsi://[<username>%<password>@]<host>[:<port>]/<targetname>/<lun>
1641  */
1642 static void iscsi_parse_filename(const char *filename, QDict *options,
1643                                  Error **errp)
1644 {
1645     struct iscsi_url *iscsi_url;
1646     const char *transport_name;
1647     char *lun_str;
1648 
1649     iscsi_url = iscsi_parse_full_url(NULL, filename);
1650     if (iscsi_url == NULL) {
1651         error_setg(errp, "Failed to parse URL : %s", filename);
1652         return;
1653     }
1654 
1655 #if LIBISCSI_API_VERSION >= (20160603)
1656     switch (iscsi_url->transport) {
1657     case TCP_TRANSPORT:
1658         transport_name = "tcp";
1659         break;
1660     case ISER_TRANSPORT:
1661         transport_name = "iser";
1662         break;
1663     default:
1664         error_setg(errp, "Unknown transport type (%d)",
1665                    iscsi_url->transport);
1666         return;
1667     }
1668 #else
1669     transport_name = "tcp";
1670 #endif
1671 
1672     qdict_set_default_str(options, "transport", transport_name);
1673     qdict_set_default_str(options, "portal", iscsi_url->portal);
1674     qdict_set_default_str(options, "target", iscsi_url->target);
1675 
1676     lun_str = g_strdup_printf("%d", iscsi_url->lun);
1677     qdict_set_default_str(options, "lun", lun_str);
1678     g_free(lun_str);
1679 
1680     /* User/password from -iscsi take precedence over those from the URL */
1681     iscsi_parse_iscsi_option(iscsi_url->target, options);
1682 
1683     if (iscsi_url->user[0] != '\0') {
1684         qdict_set_default_str(options, "user", iscsi_url->user);
1685         qdict_set_default_str(options, "password", iscsi_url->passwd);
1686     }
1687 
1688     iscsi_destroy_url(iscsi_url);
1689 }
1690 
1691 static QemuOptsList runtime_opts = {
1692     .name = "iscsi",
1693     .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
1694     .desc = {
1695         {
1696             .name = "transport",
1697             .type = QEMU_OPT_STRING,
1698         },
1699         {
1700             .name = "portal",
1701             .type = QEMU_OPT_STRING,
1702         },
1703         {
1704             .name = "target",
1705             .type = QEMU_OPT_STRING,
1706         },
1707         {
1708             .name = "user",
1709             .type = QEMU_OPT_STRING,
1710         },
1711         {
1712             .name = "password",
1713             .type = QEMU_OPT_STRING,
1714         },
1715         {
1716             .name = "password-secret",
1717             .type = QEMU_OPT_STRING,
1718         },
1719         {
1720             .name = "lun",
1721             .type = QEMU_OPT_NUMBER,
1722         },
1723         {
1724             .name = "initiator-name",
1725             .type = QEMU_OPT_STRING,
1726         },
1727         {
1728             .name = "header-digest",
1729             .type = QEMU_OPT_STRING,
1730         },
1731         {
1732             .name = "timeout",
1733             .type = QEMU_OPT_NUMBER,
1734         },
1735         { /* end of list */ }
1736     },
1737 };
1738 
1739 static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
1740                       Error **errp)
1741 {
1742     IscsiLun *iscsilun = bs->opaque;
1743     struct iscsi_context *iscsi = NULL;
1744     struct scsi_task *task = NULL;
1745     struct scsi_inquiry_standard *inq = NULL;
1746     struct scsi_inquiry_supported_pages *inq_vpd;
1747     char *initiator_name = NULL;
1748     QemuOpts *opts;
1749     Error *local_err = NULL;
1750     const char *transport_name, *portal, *target;
1751 #if LIBISCSI_API_VERSION >= (20160603)
1752     enum iscsi_transport_type transport;
1753 #endif
1754     int i, ret = 0, timeout = 0, lun;
1755 
1756     opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
1757     qemu_opts_absorb_qdict(opts, options, &local_err);
1758     if (local_err) {
1759         error_propagate(errp, local_err);
1760         ret = -EINVAL;
1761         goto out;
1762     }
1763 
1764     transport_name = qemu_opt_get(opts, "transport");
1765     portal = qemu_opt_get(opts, "portal");
1766     target = qemu_opt_get(opts, "target");
1767     lun = qemu_opt_get_number(opts, "lun", 0);
1768 
1769     if (!transport_name || !portal || !target) {
1770         error_setg(errp, "Need all of transport, portal and target options");
1771         ret = -EINVAL;
1772         goto out;
1773     }
1774 
1775     if (!strcmp(transport_name, "tcp")) {
1776 #if LIBISCSI_API_VERSION >= (20160603)
1777         transport = TCP_TRANSPORT;
1778     } else if (!strcmp(transport_name, "iser")) {
1779         transport = ISER_TRANSPORT;
1780 #else
1781         /* TCP is what older libiscsi versions always use */
1782 #endif
1783     } else {
1784         error_setg(errp, "Unknown transport: %s", transport_name);
1785         ret = -EINVAL;
1786         goto out;
1787     }
1788 
1789     memset(iscsilun, 0, sizeof(IscsiLun));
1790 
1791     initiator_name = get_initiator_name(opts);
1792 
1793     iscsi = iscsi_create_context(initiator_name);
1794     if (iscsi == NULL) {
1795         error_setg(errp, "iSCSI: Failed to create iSCSI context.");
1796         ret = -ENOMEM;
1797         goto out;
1798     }
1799 #if LIBISCSI_API_VERSION >= (20160603)
1800     if (iscsi_init_transport(iscsi, transport)) {
1801         error_setg(errp, ("Error initializing transport."));
1802         ret = -EINVAL;
1803         goto out;
1804     }
1805 #endif
1806     if (iscsi_set_targetname(iscsi, target)) {
1807         error_setg(errp, "iSCSI: Failed to set target name.");
1808         ret = -EINVAL;
1809         goto out;
1810     }
1811 
1812     /* check if we got CHAP username/password via the options */
1813     apply_chap(iscsi, opts, &local_err);
1814     if (local_err != NULL) {
1815         error_propagate(errp, local_err);
1816         ret = -EINVAL;
1817         goto out;
1818     }
1819 
1820     if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) {
1821         error_setg(errp, "iSCSI: Failed to set session type to normal.");
1822         ret = -EINVAL;
1823         goto out;
1824     }
1825 
1826     /* check if we got HEADER_DIGEST via the options */
1827     apply_header_digest(iscsi, opts, &local_err);
1828     if (local_err != NULL) {
1829         error_propagate(errp, local_err);
1830         ret = -EINVAL;
1831         goto out;
1832     }
1833 
1834     /* timeout handling is broken in libiscsi before 1.15.0 */
1835     timeout = qemu_opt_get_number(opts, "timeout", 0);
1836 #if LIBISCSI_API_VERSION >= 20150621
1837     iscsi_set_timeout(iscsi, timeout);
1838 #else
1839     if (timeout) {
1840         error_report("iSCSI: ignoring timeout value for libiscsi <1.15.0");
1841     }
1842 #endif
1843 
1844     if (iscsi_full_connect_sync(iscsi, portal, lun) != 0) {
1845         error_setg(errp, "iSCSI: Failed to connect to LUN : %s",
1846             iscsi_get_error(iscsi));
1847         ret = -EINVAL;
1848         goto out;
1849     }
1850 
1851     iscsilun->iscsi = iscsi;
1852     iscsilun->aio_context = bdrv_get_aio_context(bs);
1853     iscsilun->lun = lun;
1854     iscsilun->has_write_same = true;
1855 
1856     task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 0, 0,
1857                             (void **) &inq, errp);
1858     if (task == NULL) {
1859         ret = -EINVAL;
1860         goto out;
1861     }
1862     iscsilun->type = inq->periperal_device_type;
1863     scsi_free_scsi_task(task);
1864     task = NULL;
1865 
1866     iscsi_modesense_sync(iscsilun);
1867     if (iscsilun->dpofua) {
1868         bs->supported_write_flags = BDRV_REQ_FUA;
1869     }
1870     bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP;
1871 
1872     /* Check the write protect flag of the LUN if we want to write */
1873     if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
1874         iscsilun->write_protected) {
1875         error_setg(errp, "Cannot open a write protected LUN as read-write");
1876         ret = -EACCES;
1877         goto out;
1878     }
1879 
1880     iscsi_readcapacity_sync(iscsilun, &local_err);
1881     if (local_err != NULL) {
1882         error_propagate(errp, local_err);
1883         ret = -EINVAL;
1884         goto out;
1885     }
1886     bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun);
1887 
1888     /* We don't have any emulation for devices other than disks and CD-ROMs, so
1889      * this must be sg ioctl compatible. We force it to be sg, otherwise qemu
1890      * will try to read from the device to guess the image format.
1891      */
1892     if (iscsilun->type != TYPE_DISK && iscsilun->type != TYPE_ROM) {
1893         bs->sg = true;
1894     }
1895 
1896     task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
1897                             SCSI_INQUIRY_PAGECODE_SUPPORTED_VPD_PAGES,
1898                             (void **) &inq_vpd, errp);
1899     if (task == NULL) {
1900         ret = -EINVAL;
1901         goto out;
1902     }
1903     for (i = 0; i < inq_vpd->num_pages; i++) {
1904         struct scsi_task *inq_task;
1905         struct scsi_inquiry_logical_block_provisioning *inq_lbp;
1906         struct scsi_inquiry_block_limits *inq_bl;
1907         switch (inq_vpd->pages[i]) {
1908         case SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING:
1909             inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
1910                                         SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING,
1911                                         (void **) &inq_lbp, errp);
1912             if (inq_task == NULL) {
1913                 ret = -EINVAL;
1914                 goto out;
1915             }
1916             memcpy(&iscsilun->lbp, inq_lbp,
1917                    sizeof(struct scsi_inquiry_logical_block_provisioning));
1918             scsi_free_scsi_task(inq_task);
1919             break;
1920         case SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS:
1921             inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
1922                                     SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS,
1923                                     (void **) &inq_bl, errp);
1924             if (inq_task == NULL) {
1925                 ret = -EINVAL;
1926                 goto out;
1927             }
1928             memcpy(&iscsilun->bl, inq_bl,
1929                    sizeof(struct scsi_inquiry_block_limits));
1930             scsi_free_scsi_task(inq_task);
1931             break;
1932         default:
1933             break;
1934         }
1935     }
1936     scsi_free_scsi_task(task);
1937     task = NULL;
1938 
1939     qemu_mutex_init(&iscsilun->mutex);
1940     iscsi_attach_aio_context(bs, iscsilun->aio_context);
1941 
1942     /* Guess the internal cluster (page) size of the iscsi target by the means
1943      * of opt_unmap_gran. Transfer the unmap granularity only if it has a
1944      * reasonable size */
1945     if (iscsilun->bl.opt_unmap_gran * iscsilun->block_size >= 4 * 1024 &&
1946         iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) {
1947         iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran *
1948                                      iscsilun->block_size) >> BDRV_SECTOR_BITS;
1949         if (iscsilun->lbprz) {
1950             ret = iscsi_allocmap_init(iscsilun, bs->open_flags);
1951         }
1952     }
1953 
1954 out:
1955     qemu_opts_del(opts);
1956     g_free(initiator_name);
1957     if (task != NULL) {
1958         scsi_free_scsi_task(task);
1959     }
1960 
1961     if (ret) {
1962         if (iscsi != NULL) {
1963             if (iscsi_is_logged_in(iscsi)) {
1964                 iscsi_logout_sync(iscsi);
1965             }
1966             iscsi_destroy_context(iscsi);
1967         }
1968         memset(iscsilun, 0, sizeof(IscsiLun));
1969     }
1970     return ret;
1971 }
1972 
1973 static void iscsi_close(BlockDriverState *bs)
1974 {
1975     IscsiLun *iscsilun = bs->opaque;
1976     struct iscsi_context *iscsi = iscsilun->iscsi;
1977 
1978     iscsi_detach_aio_context(bs);
1979     if (iscsi_is_logged_in(iscsi)) {
1980         iscsi_logout_sync(iscsi);
1981     }
1982     iscsi_destroy_context(iscsi);
1983     g_free(iscsilun->zeroblock);
1984     iscsi_allocmap_free(iscsilun);
1985     qemu_mutex_destroy(&iscsilun->mutex);
1986     memset(iscsilun, 0, sizeof(IscsiLun));
1987 }
1988 
1989 static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
1990 {
1991     /* We don't actually refresh here, but just return data queried in
1992      * iscsi_open(): iscsi targets don't change their limits. */
1993 
1994     IscsiLun *iscsilun = bs->opaque;
1995     uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
1996     unsigned int block_size = MAX(BDRV_SECTOR_SIZE, iscsilun->block_size);
1997 
1998     assert(iscsilun->block_size >= BDRV_SECTOR_SIZE || bs->sg);
1999 
2000     bs->bl.request_alignment = block_size;
2001 
2002     if (iscsilun->bl.max_xfer_len) {
2003         max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len);
2004     }
2005 
2006     if (max_xfer_len * block_size < INT_MAX) {
2007         bs->bl.max_transfer = max_xfer_len * iscsilun->block_size;
2008     }
2009 
2010     if (iscsilun->lbp.lbpu) {
2011         if (iscsilun->bl.max_unmap < 0xffffffff / block_size) {
2012             bs->bl.max_pdiscard =
2013                 iscsilun->bl.max_unmap * iscsilun->block_size;
2014         }
2015         bs->bl.pdiscard_alignment =
2016             iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
2017     } else {
2018         bs->bl.pdiscard_alignment = iscsilun->block_size;
2019     }
2020 
2021     if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) {
2022         bs->bl.max_pwrite_zeroes =
2023             iscsilun->bl.max_ws_len * iscsilun->block_size;
2024     }
2025     if (iscsilun->lbp.lbpws) {
2026         bs->bl.pwrite_zeroes_alignment =
2027             iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
2028     } else {
2029         bs->bl.pwrite_zeroes_alignment = iscsilun->block_size;
2030     }
2031     if (iscsilun->bl.opt_xfer_len &&
2032         iscsilun->bl.opt_xfer_len < INT_MAX / block_size) {
2033         bs->bl.opt_transfer = pow2floor(iscsilun->bl.opt_xfer_len *
2034                                         iscsilun->block_size);
2035     }
2036 }
2037 
2038 /* Note that this will not re-establish a connection with an iSCSI target - it
2039  * is effectively a NOP.  */
2040 static int iscsi_reopen_prepare(BDRVReopenState *state,
2041                                 BlockReopenQueue *queue, Error **errp)
2042 {
2043     IscsiLun *iscsilun = state->bs->opaque;
2044 
2045     if (state->flags & BDRV_O_RDWR && iscsilun->write_protected) {
2046         error_setg(errp, "Cannot open a write protected LUN as read-write");
2047         return -EACCES;
2048     }
2049     return 0;
2050 }
2051 
2052 static void iscsi_reopen_commit(BDRVReopenState *reopen_state)
2053 {
2054     IscsiLun *iscsilun = reopen_state->bs->opaque;
2055 
2056     /* the cache.direct status might have changed */
2057     if (iscsilun->allocmap != NULL) {
2058         iscsi_allocmap_init(iscsilun, reopen_state->flags);
2059     }
2060 }
2061 
2062 static int iscsi_truncate(BlockDriverState *bs, int64_t offset, Error **errp)
2063 {
2064     IscsiLun *iscsilun = bs->opaque;
2065     Error *local_err = NULL;
2066 
2067     if (iscsilun->type != TYPE_DISK) {
2068         error_setg(errp, "Cannot resize non-disk iSCSI devices");
2069         return -ENOTSUP;
2070     }
2071 
2072     iscsi_readcapacity_sync(iscsilun, &local_err);
2073     if (local_err != NULL) {
2074         error_propagate(errp, local_err);
2075         return -EIO;
2076     }
2077 
2078     if (offset > iscsi_getlength(bs)) {
2079         error_setg(errp, "Cannot grow iSCSI devices");
2080         return -EINVAL;
2081     }
2082 
2083     if (iscsilun->allocmap != NULL) {
2084         iscsi_allocmap_init(iscsilun, bs->open_flags);
2085     }
2086 
2087     return 0;
2088 }
2089 
2090 static int iscsi_create(const char *filename, QemuOpts *opts, Error **errp)
2091 {
2092     int ret = 0;
2093     int64_t total_size = 0;
2094     BlockDriverState *bs;
2095     IscsiLun *iscsilun = NULL;
2096     QDict *bs_options;
2097     Error *local_err = NULL;
2098 
2099     bs = bdrv_new();
2100 
2101     /* Read out options */
2102     total_size = DIV_ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
2103                               BDRV_SECTOR_SIZE);
2104     bs->opaque = g_new0(struct IscsiLun, 1);
2105     iscsilun = bs->opaque;
2106 
2107     bs_options = qdict_new();
2108     iscsi_parse_filename(filename, bs_options, &local_err);
2109     if (local_err) {
2110         error_propagate(errp, local_err);
2111         ret = -EINVAL;
2112     } else {
2113         ret = iscsi_open(bs, bs_options, 0, NULL);
2114     }
2115     QDECREF(bs_options);
2116 
2117     if (ret != 0) {
2118         goto out;
2119     }
2120     iscsi_detach_aio_context(bs);
2121     if (iscsilun->type != TYPE_DISK) {
2122         ret = -ENODEV;
2123         goto out;
2124     }
2125     if (bs->total_sectors < total_size) {
2126         ret = -ENOSPC;
2127         goto out;
2128     }
2129 
2130     ret = 0;
2131 out:
2132     if (iscsilun->iscsi != NULL) {
2133         iscsi_destroy_context(iscsilun->iscsi);
2134     }
2135     g_free(bs->opaque);
2136     bs->opaque = NULL;
2137     bdrv_unref(bs);
2138     return ret;
2139 }
2140 
2141 static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2142 {
2143     IscsiLun *iscsilun = bs->opaque;
2144     bdi->unallocated_blocks_are_zero = iscsilun->lbprz;
2145     bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws;
2146     bdi->cluster_size = iscsilun->cluster_sectors * BDRV_SECTOR_SIZE;
2147     return 0;
2148 }
2149 
2150 static void iscsi_invalidate_cache(BlockDriverState *bs,
2151                                    Error **errp)
2152 {
2153     IscsiLun *iscsilun = bs->opaque;
2154     iscsi_allocmap_invalidate(iscsilun);
2155 }
2156 
2157 static QemuOptsList iscsi_create_opts = {
2158     .name = "iscsi-create-opts",
2159     .head = QTAILQ_HEAD_INITIALIZER(iscsi_create_opts.head),
2160     .desc = {
2161         {
2162             .name = BLOCK_OPT_SIZE,
2163             .type = QEMU_OPT_SIZE,
2164             .help = "Virtual disk size"
2165         },
2166         { /* end of list */ }
2167     }
2168 };
2169 
2170 static BlockDriver bdrv_iscsi = {
2171     .format_name     = "iscsi",
2172     .protocol_name   = "iscsi",
2173 
2174     .instance_size          = sizeof(IscsiLun),
2175     .bdrv_parse_filename    = iscsi_parse_filename,
2176     .bdrv_file_open         = iscsi_open,
2177     .bdrv_close             = iscsi_close,
2178     .bdrv_create            = iscsi_create,
2179     .create_opts            = &iscsi_create_opts,
2180     .bdrv_reopen_prepare    = iscsi_reopen_prepare,
2181     .bdrv_reopen_commit     = iscsi_reopen_commit,
2182     .bdrv_invalidate_cache  = iscsi_invalidate_cache,
2183 
2184     .bdrv_getlength  = iscsi_getlength,
2185     .bdrv_get_info   = iscsi_get_info,
2186     .bdrv_truncate   = iscsi_truncate,
2187     .bdrv_refresh_limits = iscsi_refresh_limits,
2188 
2189     .bdrv_co_get_block_status = iscsi_co_get_block_status,
2190     .bdrv_co_pdiscard      = iscsi_co_pdiscard,
2191     .bdrv_co_pwrite_zeroes = iscsi_co_pwrite_zeroes,
2192     .bdrv_co_readv         = iscsi_co_readv,
2193     .bdrv_co_writev_flags  = iscsi_co_writev_flags,
2194     .bdrv_co_flush_to_disk = iscsi_co_flush,
2195 
2196 #ifdef __linux__
2197     .bdrv_aio_ioctl   = iscsi_aio_ioctl,
2198 #endif
2199 
2200     .bdrv_detach_aio_context = iscsi_detach_aio_context,
2201     .bdrv_attach_aio_context = iscsi_attach_aio_context,
2202 };
2203 
2204 #if LIBISCSI_API_VERSION >= (20160603)
2205 static BlockDriver bdrv_iser = {
2206     .format_name     = "iser",
2207     .protocol_name   = "iser",
2208 
2209     .instance_size          = sizeof(IscsiLun),
2210     .bdrv_parse_filename    = iscsi_parse_filename,
2211     .bdrv_file_open         = iscsi_open,
2212     .bdrv_close             = iscsi_close,
2213     .bdrv_create            = iscsi_create,
2214     .create_opts            = &iscsi_create_opts,
2215     .bdrv_reopen_prepare    = iscsi_reopen_prepare,
2216     .bdrv_reopen_commit     = iscsi_reopen_commit,
2217     .bdrv_invalidate_cache  = iscsi_invalidate_cache,
2218 
2219     .bdrv_getlength  = iscsi_getlength,
2220     .bdrv_get_info   = iscsi_get_info,
2221     .bdrv_truncate   = iscsi_truncate,
2222     .bdrv_refresh_limits = iscsi_refresh_limits,
2223 
2224     .bdrv_co_get_block_status = iscsi_co_get_block_status,
2225     .bdrv_co_pdiscard      = iscsi_co_pdiscard,
2226     .bdrv_co_pwrite_zeroes = iscsi_co_pwrite_zeroes,
2227     .bdrv_co_readv         = iscsi_co_readv,
2228     .bdrv_co_writev_flags  = iscsi_co_writev_flags,
2229     .bdrv_co_flush_to_disk = iscsi_co_flush,
2230 
2231 #ifdef __linux__
2232     .bdrv_aio_ioctl   = iscsi_aio_ioctl,
2233 #endif
2234 
2235     .bdrv_detach_aio_context = iscsi_detach_aio_context,
2236     .bdrv_attach_aio_context = iscsi_attach_aio_context,
2237 };
2238 #endif
2239 
2240 static void iscsi_block_init(void)
2241 {
2242     bdrv_register(&bdrv_iscsi);
2243 #if LIBISCSI_API_VERSION >= (20160603)
2244     bdrv_register(&bdrv_iser);
2245 #endif
2246 }
2247 
2248 block_init(iscsi_block_init);
2249