xref: /openbmc/qemu/hw/ide/core.c (revision 86d063fa)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/irq.h"
28 #include "hw/isa/isa.h"
29 #include "migration/vmstate.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/timer.h"
33 #include "qemu/hw-version.h"
34 #include "qemu/memalign.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/blockdev.h"
37 #include "sysemu/dma.h"
38 #include "hw/block/block.h"
39 #include "sysemu/block-backend.h"
40 #include "qapi/error.h"
41 #include "qemu/cutils.h"
42 #include "sysemu/replay.h"
43 #include "sysemu/runstate.h"
44 #include "hw/ide/internal.h"
45 #include "trace.h"
46 
47 /* These values were based on a Seagate ST3500418AS but have been modified
48    to make more sense in QEMU */
49 static const int smart_attributes[][12] = {
50     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
51     /* raw read error rate*/
52     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
53     /* spin up */
54     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55     /* start stop count */
56     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
57     /* remapped sectors */
58     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
59     /* power on hours */
60     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
61     /* power cycle count */
62     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
63     /* airflow-temperature-celsius */
64     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
65 };
66 
67 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
68     [IDE_DMA_READ] = "DMA READ",
69     [IDE_DMA_WRITE] = "DMA WRITE",
70     [IDE_DMA_TRIM] = "DMA TRIM",
71     [IDE_DMA_ATAPI] = "DMA ATAPI"
72 };
73 
74 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
75 {
76     if ((unsigned)enval < IDE_DMA__COUNT) {
77         return IDE_DMA_CMD_lookup[enval];
78     }
79     return "DMA UNKNOWN CMD";
80 }
81 
82 static void ide_dummy_transfer_stop(IDEState *s);
83 
84 static void padstr(char *str, const char *src, int len)
85 {
86     int i, v;
87     for(i = 0; i < len; i++) {
88         if (*src)
89             v = *src++;
90         else
91             v = ' ';
92         str[i^1] = v;
93     }
94 }
95 
96 static void put_le16(uint16_t *p, unsigned int v)
97 {
98     *p = cpu_to_le16(v);
99 }
100 
101 static void ide_identify_size(IDEState *s)
102 {
103     uint16_t *p = (uint16_t *)s->identify_data;
104     int64_t nb_sectors_lba28 = s->nb_sectors;
105     if (nb_sectors_lba28 >= 1 << 28) {
106         nb_sectors_lba28 = (1 << 28) - 1;
107     }
108     put_le16(p + 60, nb_sectors_lba28);
109     put_le16(p + 61, nb_sectors_lba28 >> 16);
110     put_le16(p + 100, s->nb_sectors);
111     put_le16(p + 101, s->nb_sectors >> 16);
112     put_le16(p + 102, s->nb_sectors >> 32);
113     put_le16(p + 103, s->nb_sectors >> 48);
114 }
115 
116 static void ide_identify(IDEState *s)
117 {
118     uint16_t *p;
119     unsigned int oldsize;
120     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
121 
122     p = (uint16_t *)s->identify_data;
123     if (s->identify_set) {
124         goto fill_buffer;
125     }
126     memset(p, 0, sizeof(s->identify_data));
127 
128     put_le16(p + 0, 0x0040);
129     put_le16(p + 1, s->cylinders);
130     put_le16(p + 3, s->heads);
131     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
132     put_le16(p + 5, 512); /* XXX: retired, remove ? */
133     put_le16(p + 6, s->sectors);
134     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
135     put_le16(p + 20, 3); /* XXX: retired, remove ? */
136     put_le16(p + 21, 512); /* cache size in sectors */
137     put_le16(p + 22, 4); /* ecc bytes */
138     padstr((char *)(p + 23), s->version, 8); /* firmware version */
139     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
140 #if MAX_MULT_SECTORS > 1
141     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
142 #endif
143     put_le16(p + 48, 1); /* dword I/O */
144     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
145     put_le16(p + 51, 0x200); /* PIO transfer cycle */
146     put_le16(p + 52, 0x200); /* DMA transfer cycle */
147     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
148     put_le16(p + 54, s->cylinders);
149     put_le16(p + 55, s->heads);
150     put_le16(p + 56, s->sectors);
151     oldsize = s->cylinders * s->heads * s->sectors;
152     put_le16(p + 57, oldsize);
153     put_le16(p + 58, oldsize >> 16);
154     if (s->mult_sectors)
155         put_le16(p + 59, 0x100 | s->mult_sectors);
156     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
157     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
158     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
159     put_le16(p + 63, 0x07); /* mdma0-2 supported */
160     put_le16(p + 64, 0x03); /* pio3-4 supported */
161     put_le16(p + 65, 120);
162     put_le16(p + 66, 120);
163     put_le16(p + 67, 120);
164     put_le16(p + 68, 120);
165     if (dev && dev->conf.discard_granularity) {
166         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
167     }
168 
169     if (s->ncq_queues) {
170         put_le16(p + 75, s->ncq_queues - 1);
171         /* NCQ supported */
172         put_le16(p + 76, (1 << 8));
173     }
174 
175     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
176     put_le16(p + 81, 0x16); /* conforms to ata5 */
177     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
178     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
179     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
180     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
181     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
182     if (s->wwn) {
183         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
184     } else {
185         put_le16(p + 84, (1 << 14) | 0);
186     }
187     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
188     if (blk_enable_write_cache(s->blk)) {
189         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
190     } else {
191         put_le16(p + 85, (1 << 14) | 1);
192     }
193     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
194     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
195     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
196     if (s->wwn) {
197         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
198     } else {
199         put_le16(p + 87, (1 << 14) | 0);
200     }
201     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
202     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
203     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
204     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
205     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
206     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
207 
208     if (dev && dev->conf.physical_block_size)
209         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
210     if (s->wwn) {
211         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
212         put_le16(p + 108, s->wwn >> 48);
213         put_le16(p + 109, s->wwn >> 32);
214         put_le16(p + 110, s->wwn >> 16);
215         put_le16(p + 111, s->wwn);
216     }
217     if (dev && dev->conf.discard_granularity) {
218         put_le16(p + 169, 1); /* TRIM support */
219     }
220     if (dev) {
221         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
222     }
223 
224     ide_identify_size(s);
225     s->identify_set = 1;
226 
227 fill_buffer:
228     memcpy(s->io_buffer, p, sizeof(s->identify_data));
229 }
230 
231 static void ide_atapi_identify(IDEState *s)
232 {
233     uint16_t *p;
234 
235     p = (uint16_t *)s->identify_data;
236     if (s->identify_set) {
237         goto fill_buffer;
238     }
239     memset(p, 0, sizeof(s->identify_data));
240 
241     /* Removable CDROM, 50us response, 12 byte packets */
242     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
243     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
244     put_le16(p + 20, 3); /* buffer type */
245     put_le16(p + 21, 512); /* cache size in sectors */
246     put_le16(p + 22, 4); /* ecc bytes */
247     padstr((char *)(p + 23), s->version, 8); /* firmware version */
248     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
249     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
250 #ifdef USE_DMA_CDROM
251     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
252     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
253     put_le16(p + 62, 7);  /* single word dma0-2 supported */
254     put_le16(p + 63, 7);  /* mdma0-2 supported */
255 #else
256     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
257     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
258     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
259 #endif
260     put_le16(p + 64, 3); /* pio3-4 supported */
261     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
262     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
263     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
264     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
265 
266     put_le16(p + 71, 30); /* in ns */
267     put_le16(p + 72, 30); /* in ns */
268 
269     if (s->ncq_queues) {
270         put_le16(p + 75, s->ncq_queues - 1);
271         /* NCQ supported */
272         put_le16(p + 76, (1 << 8));
273     }
274 
275     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
276     if (s->wwn) {
277         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
278         put_le16(p + 87, (1 << 8)); /* WWN enabled */
279     }
280 
281 #ifdef USE_DMA_CDROM
282     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
283 #endif
284 
285     if (s->wwn) {
286         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
287         put_le16(p + 108, s->wwn >> 48);
288         put_le16(p + 109, s->wwn >> 32);
289         put_le16(p + 110, s->wwn >> 16);
290         put_le16(p + 111, s->wwn);
291     }
292 
293     s->identify_set = 1;
294 
295 fill_buffer:
296     memcpy(s->io_buffer, p, sizeof(s->identify_data));
297 }
298 
299 static void ide_cfata_identify_size(IDEState *s)
300 {
301     uint16_t *p = (uint16_t *)s->identify_data;
302     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
303     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
304     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
305     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
306 }
307 
308 static void ide_cfata_identify(IDEState *s)
309 {
310     uint16_t *p;
311     uint32_t cur_sec;
312 
313     p = (uint16_t *)s->identify_data;
314     if (s->identify_set) {
315         goto fill_buffer;
316     }
317     memset(p, 0, sizeof(s->identify_data));
318 
319     cur_sec = s->cylinders * s->heads * s->sectors;
320 
321     put_le16(p + 0, 0x848a);                    /* CF Storage Card signature */
322     put_le16(p + 1, s->cylinders);              /* Default cylinders */
323     put_le16(p + 3, s->heads);                  /* Default heads */
324     put_le16(p + 6, s->sectors);                /* Default sectors per track */
325     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
326     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
327     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
328     put_le16(p + 22, 0x0004);                   /* ECC bytes */
329     padstr((char *) (p + 23), s->version, 8);   /* Firmware Revision */
330     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
331 #if MAX_MULT_SECTORS > 1
332     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
333 #else
334     put_le16(p + 47, 0x0000);
335 #endif
336     put_le16(p + 49, 0x0f00);                   /* Capabilities */
337     put_le16(p + 51, 0x0002);                   /* PIO cycle timing mode */
338     put_le16(p + 52, 0x0001);                   /* DMA cycle timing mode */
339     put_le16(p + 53, 0x0003);                   /* Translation params valid */
340     put_le16(p + 54, s->cylinders);             /* Current cylinders */
341     put_le16(p + 55, s->heads);                 /* Current heads */
342     put_le16(p + 56, s->sectors);               /* Current sectors */
343     put_le16(p + 57, cur_sec);                  /* Current capacity */
344     put_le16(p + 58, cur_sec >> 16);            /* Current capacity */
345     if (s->mult_sectors)                        /* Multiple sector setting */
346         put_le16(p + 59, 0x100 | s->mult_sectors);
347     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
348     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
349     put_le16(p + 63, 0x0203);                   /* Multiword DMA capability */
350     put_le16(p + 64, 0x0001);                   /* Flow Control PIO support */
351     put_le16(p + 65, 0x0096);                   /* Min. Multiword DMA cycle */
352     put_le16(p + 66, 0x0096);                   /* Rec. Multiword DMA cycle */
353     put_le16(p + 68, 0x00b4);                   /* Min. PIO cycle time */
354     put_le16(p + 82, 0x400c);                   /* Command Set supported */
355     put_le16(p + 83, 0x7068);                   /* Command Set supported */
356     put_le16(p + 84, 0x4000);                   /* Features supported */
357     put_le16(p + 85, 0x000c);                   /* Command Set enabled */
358     put_le16(p + 86, 0x7044);                   /* Command Set enabled */
359     put_le16(p + 87, 0x4000);                   /* Features enabled */
360     put_le16(p + 91, 0x4060);                   /* Current APM level */
361     put_le16(p + 129, 0x0002);                  /* Current features option */
362     put_le16(p + 130, 0x0005);                  /* Reassigned sectors */
363     put_le16(p + 131, 0x0001);                  /* Initial power mode */
364     put_le16(p + 132, 0x0000);                  /* User signature */
365     put_le16(p + 160, 0x8100);                  /* Power requirement */
366     put_le16(p + 161, 0x8001);                  /* CF command set */
367 
368     ide_cfata_identify_size(s);
369     s->identify_set = 1;
370 
371 fill_buffer:
372     memcpy(s->io_buffer, p, sizeof(s->identify_data));
373 }
374 
375 static void ide_set_signature(IDEState *s)
376 {
377     s->select &= ~(ATA_DEV_HS); /* clear head */
378     /* put signature */
379     s->nsector = 1;
380     s->sector = 1;
381     if (s->drive_kind == IDE_CD) {
382         s->lcyl = 0x14;
383         s->hcyl = 0xeb;
384     } else if (s->blk) {
385         s->lcyl = 0;
386         s->hcyl = 0;
387     } else {
388         s->lcyl = 0xff;
389         s->hcyl = 0xff;
390     }
391 }
392 
393 static bool ide_sect_range_ok(IDEState *s,
394                               uint64_t sector, uint64_t nb_sectors)
395 {
396     uint64_t total_sectors;
397 
398     blk_get_geometry(s->blk, &total_sectors);
399     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
400         return false;
401     }
402     return true;
403 }
404 
405 typedef struct TrimAIOCB {
406     BlockAIOCB common;
407     IDEState *s;
408     QEMUBH *bh;
409     int ret;
410     QEMUIOVector *qiov;
411     BlockAIOCB *aiocb;
412     int i, j;
413 } TrimAIOCB;
414 
415 static void trim_aio_cancel(BlockAIOCB *acb)
416 {
417     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
418 
419     /* Exit the loop so ide_issue_trim_cb will not continue  */
420     iocb->j = iocb->qiov->niov - 1;
421     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
422 
423     iocb->ret = -ECANCELED;
424 
425     if (iocb->aiocb) {
426         blk_aio_cancel_async(iocb->aiocb);
427         iocb->aiocb = NULL;
428     }
429 }
430 
431 static const AIOCBInfo trim_aiocb_info = {
432     .aiocb_size         = sizeof(TrimAIOCB),
433     .cancel_async       = trim_aio_cancel,
434 };
435 
436 static void ide_trim_bh_cb(void *opaque)
437 {
438     TrimAIOCB *iocb = opaque;
439     BlockBackend *blk = iocb->s->blk;
440 
441     iocb->common.cb(iocb->common.opaque, iocb->ret);
442 
443     qemu_bh_delete(iocb->bh);
444     iocb->bh = NULL;
445     qemu_aio_unref(iocb);
446 
447     /* Paired with an increment in ide_issue_trim() */
448     blk_dec_in_flight(blk);
449 }
450 
451 static void ide_issue_trim_cb(void *opaque, int ret)
452 {
453     TrimAIOCB *iocb = opaque;
454     IDEState *s = iocb->s;
455 
456     if (iocb->i >= 0) {
457         if (ret >= 0) {
458             block_acct_done(blk_get_stats(s->blk), &s->acct);
459         } else {
460             block_acct_failed(blk_get_stats(s->blk), &s->acct);
461         }
462     }
463 
464     if (ret >= 0) {
465         while (iocb->j < iocb->qiov->niov) {
466             int j = iocb->j;
467             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
468                 int i = iocb->i;
469                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
470 
471                 /* 6-byte LBA + 2-byte range per entry */
472                 uint64_t entry = le64_to_cpu(buffer[i]);
473                 uint64_t sector = entry & 0x0000ffffffffffffULL;
474                 uint16_t count = entry >> 48;
475 
476                 if (count == 0) {
477                     continue;
478                 }
479 
480                 if (!ide_sect_range_ok(s, sector, count)) {
481                     block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
482                     iocb->ret = -EINVAL;
483                     goto done;
484                 }
485 
486                 block_acct_start(blk_get_stats(s->blk), &s->acct,
487                                  count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
488 
489                 /* Got an entry! Submit and exit.  */
490                 iocb->aiocb = blk_aio_pdiscard(s->blk,
491                                                sector << BDRV_SECTOR_BITS,
492                                                count << BDRV_SECTOR_BITS,
493                                                ide_issue_trim_cb, opaque);
494                 return;
495             }
496 
497             iocb->j++;
498             iocb->i = -1;
499         }
500     } else {
501         iocb->ret = ret;
502     }
503 
504 done:
505     iocb->aiocb = NULL;
506     if (iocb->bh) {
507         replay_bh_schedule_event(iocb->bh);
508     }
509 }
510 
511 BlockAIOCB *ide_issue_trim(
512         int64_t offset, QEMUIOVector *qiov,
513         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
514 {
515     IDEState *s = opaque;
516     TrimAIOCB *iocb;
517 
518     /* Paired with a decrement in ide_trim_bh_cb() */
519     blk_inc_in_flight(s->blk);
520 
521     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
522     iocb->s = s;
523     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
524     iocb->ret = 0;
525     iocb->qiov = qiov;
526     iocb->i = -1;
527     iocb->j = 0;
528     ide_issue_trim_cb(iocb, 0);
529     return &iocb->common;
530 }
531 
532 void ide_abort_command(IDEState *s)
533 {
534     ide_transfer_stop(s);
535     s->status = READY_STAT | ERR_STAT;
536     s->error = ABRT_ERR;
537 }
538 
539 static void ide_set_retry(IDEState *s)
540 {
541     s->bus->retry_unit = s->unit;
542     s->bus->retry_sector_num = ide_get_sector(s);
543     s->bus->retry_nsector = s->nsector;
544 }
545 
546 static void ide_clear_retry(IDEState *s)
547 {
548     s->bus->retry_unit = -1;
549     s->bus->retry_sector_num = 0;
550     s->bus->retry_nsector = 0;
551 }
552 
553 /* prepare data transfer and tell what to do after */
554 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
555                                   EndTransferFunc *end_transfer_func)
556 {
557     s->data_ptr = buf;
558     s->data_end = buf + size;
559     ide_set_retry(s);
560     if (!(s->status & ERR_STAT)) {
561         s->status |= DRQ_STAT;
562     }
563     if (!s->bus->dma->ops->pio_transfer) {
564         s->end_transfer_func = end_transfer_func;
565         return false;
566     }
567     s->bus->dma->ops->pio_transfer(s->bus->dma);
568     return true;
569 }
570 
571 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
572                         EndTransferFunc *end_transfer_func)
573 {
574     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
575         end_transfer_func(s);
576     }
577 }
578 
579 static void ide_cmd_done(IDEState *s)
580 {
581     if (s->bus->dma->ops->cmd_done) {
582         s->bus->dma->ops->cmd_done(s->bus->dma);
583     }
584 }
585 
586 static void ide_transfer_halt(IDEState *s)
587 {
588     s->end_transfer_func = ide_transfer_stop;
589     s->data_ptr = s->io_buffer;
590     s->data_end = s->io_buffer;
591     s->status &= ~DRQ_STAT;
592 }
593 
594 void ide_transfer_stop(IDEState *s)
595 {
596     ide_transfer_halt(s);
597     ide_cmd_done(s);
598 }
599 
600 int64_t ide_get_sector(IDEState *s)
601 {
602     int64_t sector_num;
603     if (s->select & (ATA_DEV_LBA)) {
604         if (s->lba48) {
605             sector_num = ((int64_t)s->hob_hcyl << 40) |
606                 ((int64_t) s->hob_lcyl << 32) |
607                 ((int64_t) s->hob_sector << 24) |
608                 ((int64_t) s->hcyl << 16) |
609                 ((int64_t) s->lcyl << 8) | s->sector;
610         } else {
611             /* LBA28 */
612             sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
613                 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
614         }
615     } else {
616         /* CHS */
617         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
618             (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
619     }
620 
621     return sector_num;
622 }
623 
624 void ide_set_sector(IDEState *s, int64_t sector_num)
625 {
626     unsigned int cyl, r;
627     if (s->select & (ATA_DEV_LBA)) {
628         if (s->lba48) {
629             s->sector = sector_num;
630             s->lcyl = sector_num >> 8;
631             s->hcyl = sector_num >> 16;
632             s->hob_sector = sector_num >> 24;
633             s->hob_lcyl = sector_num >> 32;
634             s->hob_hcyl = sector_num >> 40;
635         } else {
636             /* LBA28 */
637             s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
638                 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
639             s->hcyl = (sector_num >> 16);
640             s->lcyl = (sector_num >> 8);
641             s->sector = (sector_num);
642         }
643     } else {
644         /* CHS */
645         cyl = sector_num / (s->heads * s->sectors);
646         r = sector_num % (s->heads * s->sectors);
647         s->hcyl = cyl >> 8;
648         s->lcyl = cyl;
649         s->select = (s->select & ~(ATA_DEV_HS)) |
650             ((r / s->sectors) & (ATA_DEV_HS));
651         s->sector = (r % s->sectors) + 1;
652     }
653 }
654 
655 static void ide_rw_error(IDEState *s) {
656     ide_abort_command(s);
657     ide_bus_set_irq(s->bus);
658 }
659 
660 static void ide_buffered_readv_cb(void *opaque, int ret)
661 {
662     IDEBufferedRequest *req = opaque;
663     if (!req->orphaned) {
664         if (!ret) {
665             assert(req->qiov.size == req->original_qiov->size);
666             qemu_iovec_from_buf(req->original_qiov, 0,
667                                 req->qiov.local_iov.iov_base,
668                                 req->original_qiov->size);
669         }
670         req->original_cb(req->original_opaque, ret);
671     }
672     QLIST_REMOVE(req, list);
673     qemu_vfree(qemu_iovec_buf(&req->qiov));
674     g_free(req);
675 }
676 
677 #define MAX_BUFFERED_REQS 16
678 
679 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
680                                QEMUIOVector *iov, int nb_sectors,
681                                BlockCompletionFunc *cb, void *opaque)
682 {
683     BlockAIOCB *aioreq;
684     IDEBufferedRequest *req;
685     int c = 0;
686 
687     QLIST_FOREACH(req, &s->buffered_requests, list) {
688         c++;
689     }
690     if (c > MAX_BUFFERED_REQS) {
691         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
692     }
693 
694     req = g_new0(IDEBufferedRequest, 1);
695     req->original_qiov = iov;
696     req->original_cb = cb;
697     req->original_opaque = opaque;
698     qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
699                         iov->size);
700 
701     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
702                             &req->qiov, 0, ide_buffered_readv_cb, req);
703 
704     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
705     return aioreq;
706 }
707 
708 /**
709  * Cancel all pending DMA requests.
710  * Any buffered DMA requests are instantly canceled,
711  * but any pending unbuffered DMA requests must be waited on.
712  */
713 void ide_cancel_dma_sync(IDEState *s)
714 {
715     IDEBufferedRequest *req;
716 
717     /* First invoke the callbacks of all buffered requests
718      * and flag those requests as orphaned. Ideally there
719      * are no unbuffered (Scatter Gather DMA Requests or
720      * write requests) pending and we can avoid to drain. */
721     QLIST_FOREACH(req, &s->buffered_requests, list) {
722         if (!req->orphaned) {
723             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
724             req->original_cb(req->original_opaque, -ECANCELED);
725         }
726         req->orphaned = true;
727     }
728 
729     /*
730      * We can't cancel Scatter Gather DMA in the middle of the
731      * operation or a partial (not full) DMA transfer would reach
732      * the storage so we wait for completion instead (we behave
733      * like if the DMA was completed by the time the guest trying
734      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
735      * set).
736      *
737      * In the future we'll be able to safely cancel the I/O if the
738      * whole DMA operation will be submitted to disk with a single
739      * aio operation with preadv/pwritev.
740      */
741     if (s->bus->dma->aiocb) {
742         trace_ide_cancel_dma_sync_remaining();
743         blk_drain(s->blk);
744         assert(s->bus->dma->aiocb == NULL);
745     }
746 }
747 
748 static void ide_sector_read(IDEState *s);
749 
750 static void ide_sector_read_cb(void *opaque, int ret)
751 {
752     IDEState *s = opaque;
753     int n;
754 
755     s->pio_aiocb = NULL;
756     s->status &= ~BUSY_STAT;
757 
758     if (ret != 0) {
759         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
760                                 IDE_RETRY_READ)) {
761             return;
762         }
763     }
764 
765     block_acct_done(blk_get_stats(s->blk), &s->acct);
766 
767     n = s->nsector;
768     if (n > s->req_nb_sectors) {
769         n = s->req_nb_sectors;
770     }
771 
772     ide_set_sector(s, ide_get_sector(s) + n);
773     s->nsector -= n;
774     /* Allow the guest to read the io_buffer */
775     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
776     ide_bus_set_irq(s->bus);
777 }
778 
779 static void ide_sector_read(IDEState *s)
780 {
781     int64_t sector_num;
782     int n;
783 
784     s->status = READY_STAT | SEEK_STAT;
785     s->error = 0; /* not needed by IDE spec, but needed by Windows */
786     sector_num = ide_get_sector(s);
787     n = s->nsector;
788 
789     if (n == 0) {
790         ide_transfer_stop(s);
791         return;
792     }
793 
794     s->status |= BUSY_STAT;
795 
796     if (n > s->req_nb_sectors) {
797         n = s->req_nb_sectors;
798     }
799 
800     trace_ide_sector_read(sector_num, n);
801 
802     if (!ide_sect_range_ok(s, sector_num, n)) {
803         ide_rw_error(s);
804         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
805         return;
806     }
807 
808     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
809 
810     block_acct_start(blk_get_stats(s->blk), &s->acct,
811                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
812     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
813                                       ide_sector_read_cb, s);
814 }
815 
816 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
817 {
818     if (s->bus->dma->ops->commit_buf) {
819         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
820     }
821     s->io_buffer_offset += tx_bytes;
822     qemu_sglist_destroy(&s->sg);
823 }
824 
825 void ide_set_inactive(IDEState *s, bool more)
826 {
827     s->bus->dma->aiocb = NULL;
828     ide_clear_retry(s);
829     if (s->bus->dma->ops->set_inactive) {
830         s->bus->dma->ops->set_inactive(s->bus->dma, more);
831     }
832     ide_cmd_done(s);
833 }
834 
835 void ide_dma_error(IDEState *s)
836 {
837     dma_buf_commit(s, 0);
838     ide_abort_command(s);
839     ide_set_inactive(s, false);
840     ide_bus_set_irq(s->bus);
841 }
842 
843 int ide_handle_rw_error(IDEState *s, int error, int op)
844 {
845     bool is_read = (op & IDE_RETRY_READ) != 0;
846     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
847 
848     if (action == BLOCK_ERROR_ACTION_STOP) {
849         assert(s->bus->retry_unit == s->unit);
850         s->bus->error_status = op;
851     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
852         block_acct_failed(blk_get_stats(s->blk), &s->acct);
853         if (IS_IDE_RETRY_DMA(op)) {
854             ide_dma_error(s);
855         } else if (IS_IDE_RETRY_ATAPI(op)) {
856             ide_atapi_io_error(s, -error);
857         } else {
858             ide_rw_error(s);
859         }
860     }
861     blk_error_action(s->blk, action, is_read, error);
862     return action != BLOCK_ERROR_ACTION_IGNORE;
863 }
864 
865 static void ide_dma_cb(void *opaque, int ret)
866 {
867     IDEState *s = opaque;
868     int n;
869     int64_t sector_num;
870     uint64_t offset;
871     bool stay_active = false;
872     int32_t prep_size = 0;
873 
874     if (ret == -EINVAL) {
875         ide_dma_error(s);
876         return;
877     }
878 
879     if (ret < 0) {
880         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
881             s->bus->dma->aiocb = NULL;
882             dma_buf_commit(s, 0);
883             return;
884         }
885     }
886 
887     if (s->io_buffer_size > s->nsector * 512) {
888         /*
889          * The PRDs were longer than needed for this request.
890          * The Active bit must remain set after the request completes.
891          */
892         n = s->nsector;
893         stay_active = true;
894     } else {
895         n = s->io_buffer_size >> 9;
896     }
897 
898     sector_num = ide_get_sector(s);
899     if (n > 0) {
900         assert(n * 512 == s->sg.size);
901         dma_buf_commit(s, s->sg.size);
902         sector_num += n;
903         ide_set_sector(s, sector_num);
904         s->nsector -= n;
905     }
906 
907     /* end of transfer ? */
908     if (s->nsector == 0) {
909         s->status = READY_STAT | SEEK_STAT;
910         ide_bus_set_irq(s->bus);
911         goto eot;
912     }
913 
914     /* launch next transfer */
915     n = s->nsector;
916     s->io_buffer_index = 0;
917     s->io_buffer_size = n * 512;
918     prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
919     /* prepare_buf() must succeed and respect the limit */
920     assert(prep_size >= 0 && prep_size <= n * 512);
921 
922     /*
923      * Now prep_size stores the number of bytes in the sglist, and
924      * s->io_buffer_size stores the number of bytes described by the PRDs.
925      */
926 
927     if (prep_size < n * 512) {
928         /*
929          * The PRDs are too short for this request. Error condition!
930          * Reset the Active bit and don't raise the interrupt.
931          */
932         s->status = READY_STAT | SEEK_STAT;
933         dma_buf_commit(s, 0);
934         goto eot;
935     }
936 
937     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
938 
939     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
940         !ide_sect_range_ok(s, sector_num, n)) {
941         ide_dma_error(s);
942         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
943         return;
944     }
945 
946     offset = sector_num << BDRV_SECTOR_BITS;
947     switch (s->dma_cmd) {
948     case IDE_DMA_READ:
949         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
950                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
951         break;
952     case IDE_DMA_WRITE:
953         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
954                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
955         break;
956     case IDE_DMA_TRIM:
957         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
958                                         &s->sg, offset, BDRV_SECTOR_SIZE,
959                                         ide_issue_trim, s, ide_dma_cb, s,
960                                         DMA_DIRECTION_TO_DEVICE);
961         break;
962     default:
963         abort();
964     }
965     return;
966 
967 eot:
968     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
969         block_acct_done(blk_get_stats(s->blk), &s->acct);
970     }
971     ide_set_inactive(s, stay_active);
972 }
973 
974 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
975 {
976     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
977     s->io_buffer_size = 0;
978     s->dma_cmd = dma_cmd;
979 
980     switch (dma_cmd) {
981     case IDE_DMA_READ:
982         block_acct_start(blk_get_stats(s->blk), &s->acct,
983                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
984         break;
985     case IDE_DMA_WRITE:
986         block_acct_start(blk_get_stats(s->blk), &s->acct,
987                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
988         break;
989     default:
990         break;
991     }
992 
993     ide_start_dma(s, ide_dma_cb);
994 }
995 
996 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
997 {
998     s->io_buffer_index = 0;
999     ide_set_retry(s);
1000     if (s->bus->dma->ops->start_dma) {
1001         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
1002     }
1003 }
1004 
1005 static void ide_sector_write(IDEState *s);
1006 
1007 static void ide_sector_write_timer_cb(void *opaque)
1008 {
1009     IDEState *s = opaque;
1010     ide_bus_set_irq(s->bus);
1011 }
1012 
1013 static void ide_sector_write_cb(void *opaque, int ret)
1014 {
1015     IDEState *s = opaque;
1016     int n;
1017 
1018     s->pio_aiocb = NULL;
1019     s->status &= ~BUSY_STAT;
1020 
1021     if (ret != 0) {
1022         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1023             return;
1024         }
1025     }
1026 
1027     block_acct_done(blk_get_stats(s->blk), &s->acct);
1028 
1029     n = s->nsector;
1030     if (n > s->req_nb_sectors) {
1031         n = s->req_nb_sectors;
1032     }
1033     s->nsector -= n;
1034 
1035     ide_set_sector(s, ide_get_sector(s) + n);
1036     if (s->nsector == 0) {
1037         /* no more sectors to write */
1038         ide_transfer_stop(s);
1039     } else {
1040         int n1 = s->nsector;
1041         if (n1 > s->req_nb_sectors) {
1042             n1 = s->req_nb_sectors;
1043         }
1044         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1045                            ide_sector_write);
1046     }
1047 
1048     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1049         /* It seems there is a bug in the Windows 2000 installer HDD
1050            IDE driver which fills the disk with empty logs when the
1051            IDE write IRQ comes too early. This hack tries to correct
1052            that at the expense of slower write performances. Use this
1053            option _only_ to install Windows 2000. You must disable it
1054            for normal use. */
1055         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1056                   (NANOSECONDS_PER_SECOND / 1000));
1057     } else {
1058         ide_bus_set_irq(s->bus);
1059     }
1060 }
1061 
1062 static void ide_sector_write(IDEState *s)
1063 {
1064     int64_t sector_num;
1065     int n;
1066 
1067     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1068     sector_num = ide_get_sector(s);
1069 
1070     n = s->nsector;
1071     if (n > s->req_nb_sectors) {
1072         n = s->req_nb_sectors;
1073     }
1074 
1075     trace_ide_sector_write(sector_num, n);
1076 
1077     if (!ide_sect_range_ok(s, sector_num, n)) {
1078         ide_rw_error(s);
1079         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1080         return;
1081     }
1082 
1083     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1084 
1085     block_acct_start(blk_get_stats(s->blk), &s->acct,
1086                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1087     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1088                                    &s->qiov, 0, ide_sector_write_cb, s);
1089 }
1090 
1091 static void ide_flush_cb(void *opaque, int ret)
1092 {
1093     IDEState *s = opaque;
1094 
1095     s->pio_aiocb = NULL;
1096 
1097     if (ret < 0) {
1098         /* XXX: What sector number to set here? */
1099         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1100             return;
1101         }
1102     }
1103 
1104     if (s->blk) {
1105         block_acct_done(blk_get_stats(s->blk), &s->acct);
1106     }
1107     s->status = READY_STAT | SEEK_STAT;
1108     ide_cmd_done(s);
1109     ide_bus_set_irq(s->bus);
1110 }
1111 
1112 static void ide_flush_cache(IDEState *s)
1113 {
1114     if (s->blk == NULL) {
1115         ide_flush_cb(s, 0);
1116         return;
1117     }
1118 
1119     s->status |= BUSY_STAT;
1120     ide_set_retry(s);
1121     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1122     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1123 }
1124 
1125 static void ide_cfata_metadata_inquiry(IDEState *s)
1126 {
1127     uint16_t *p;
1128     uint32_t spd;
1129 
1130     p = (uint16_t *) s->io_buffer;
1131     memset(p, 0, 0x200);
1132     spd = ((s->mdata_size - 1) >> 9) + 1;
1133 
1134     put_le16(p + 0, 0x0001);                    /* Data format revision */
1135     put_le16(p + 1, 0x0000);                    /* Media property: silicon */
1136     put_le16(p + 2, s->media_changed);          /* Media status */
1137     put_le16(p + 3, s->mdata_size & 0xffff);    /* Capacity in bytes (low) */
1138     put_le16(p + 4, s->mdata_size >> 16);       /* Capacity in bytes (high) */
1139     put_le16(p + 5, spd & 0xffff);              /* Sectors per device (low) */
1140     put_le16(p + 6, spd >> 16);                 /* Sectors per device (high) */
1141 }
1142 
1143 static void ide_cfata_metadata_read(IDEState *s)
1144 {
1145     uint16_t *p;
1146 
1147     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1148         s->status = ERR_STAT;
1149         s->error = ABRT_ERR;
1150         return;
1151     }
1152 
1153     p = (uint16_t *) s->io_buffer;
1154     memset(p, 0, 0x200);
1155 
1156     put_le16(p + 0, s->media_changed);          /* Media status */
1157     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1158                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1159                                     s->nsector << 9), 0x200 - 2));
1160 }
1161 
1162 static void ide_cfata_metadata_write(IDEState *s)
1163 {
1164     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1165         s->status = ERR_STAT;
1166         s->error = ABRT_ERR;
1167         return;
1168     }
1169 
1170     s->media_changed = 0;
1171 
1172     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1173                     s->io_buffer + 2,
1174                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1175                                     s->nsector << 9), 0x200 - 2));
1176 }
1177 
1178 /* called when the inserted state of the media has changed */
1179 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1180 {
1181     IDEState *s = opaque;
1182     uint64_t nb_sectors;
1183 
1184     s->tray_open = !load;
1185     blk_get_geometry(s->blk, &nb_sectors);
1186     s->nb_sectors = nb_sectors;
1187 
1188     /*
1189      * First indicate to the guest that a CD has been removed.  That's
1190      * done on the next command the guest sends us.
1191      *
1192      * Then we set UNIT_ATTENTION, by which the guest will
1193      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1194      */
1195     s->cdrom_changed = 1;
1196     s->events.new_media = true;
1197     s->events.eject_request = false;
1198     ide_bus_set_irq(s->bus);
1199 }
1200 
1201 static void ide_cd_eject_request_cb(void *opaque, bool force)
1202 {
1203     IDEState *s = opaque;
1204 
1205     s->events.eject_request = true;
1206     if (force) {
1207         s->tray_locked = false;
1208     }
1209     ide_bus_set_irq(s->bus);
1210 }
1211 
1212 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1213 {
1214     s->lba48 = lba48;
1215 
1216     /* handle the 'magic' 0 nsector count conversion here. to avoid
1217      * fiddling with the rest of the read logic, we just store the
1218      * full sector count in ->nsector and ignore ->hob_nsector from now
1219      */
1220     if (!s->lba48) {
1221         if (!s->nsector)
1222             s->nsector = 256;
1223     } else {
1224         if (!s->nsector && !s->hob_nsector)
1225             s->nsector = 65536;
1226         else {
1227             int lo = s->nsector;
1228             int hi = s->hob_nsector;
1229 
1230             s->nsector = (hi << 8) | lo;
1231         }
1232     }
1233 }
1234 
1235 static void ide_clear_hob(IDEBus *bus)
1236 {
1237     /* any write clears HOB high bit of device control register */
1238     bus->cmd &= ~(IDE_CTRL_HOB);
1239 }
1240 
1241 /* IOport [W]rite [R]egisters */
1242 enum ATA_IOPORT_WR {
1243     ATA_IOPORT_WR_DATA = 0,
1244     ATA_IOPORT_WR_FEATURES = 1,
1245     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1246     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1247     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1248     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1249     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1250     ATA_IOPORT_WR_COMMAND = 7,
1251     ATA_IOPORT_WR_NUM_REGISTERS,
1252 };
1253 
1254 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1255     [ATA_IOPORT_WR_DATA] = "Data",
1256     [ATA_IOPORT_WR_FEATURES] = "Features",
1257     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1258     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1259     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1260     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1261     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1262     [ATA_IOPORT_WR_COMMAND] = "Command"
1263 };
1264 
1265 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1266 {
1267     IDEBus *bus = opaque;
1268     IDEState *s = ide_bus_active_if(bus);
1269     int reg_num = addr & 7;
1270 
1271     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1272 
1273     /* ignore writes to command block while busy with previous command */
1274     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1275         return;
1276     }
1277 
1278     /* NOTE: Device0 and Device1 both receive incoming register writes.
1279      * (They're on the same bus! They have to!) */
1280 
1281     switch (reg_num) {
1282     case 0:
1283         break;
1284     case ATA_IOPORT_WR_FEATURES:
1285         ide_clear_hob(bus);
1286         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1287         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1288         bus->ifs[0].feature = val;
1289         bus->ifs[1].feature = val;
1290         break;
1291     case ATA_IOPORT_WR_SECTOR_COUNT:
1292         ide_clear_hob(bus);
1293         bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1294         bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1295         bus->ifs[0].nsector = val;
1296         bus->ifs[1].nsector = val;
1297         break;
1298     case ATA_IOPORT_WR_SECTOR_NUMBER:
1299         ide_clear_hob(bus);
1300         bus->ifs[0].hob_sector = bus->ifs[0].sector;
1301         bus->ifs[1].hob_sector = bus->ifs[1].sector;
1302         bus->ifs[0].sector = val;
1303         bus->ifs[1].sector = val;
1304         break;
1305     case ATA_IOPORT_WR_CYLINDER_LOW:
1306         ide_clear_hob(bus);
1307         bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1308         bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1309         bus->ifs[0].lcyl = val;
1310         bus->ifs[1].lcyl = val;
1311         break;
1312     case ATA_IOPORT_WR_CYLINDER_HIGH:
1313         ide_clear_hob(bus);
1314         bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1315         bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1316         bus->ifs[0].hcyl = val;
1317         bus->ifs[1].hcyl = val;
1318         break;
1319     case ATA_IOPORT_WR_DEVICE_HEAD:
1320         ide_clear_hob(bus);
1321         bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1322         bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1323         /* select drive */
1324         bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1325         break;
1326     default:
1327     case ATA_IOPORT_WR_COMMAND:
1328         ide_clear_hob(bus);
1329         qemu_irq_lower(bus->irq);
1330         ide_bus_exec_cmd(bus, val);
1331         break;
1332     }
1333 }
1334 
1335 static void ide_reset(IDEState *s)
1336 {
1337     trace_ide_reset(s);
1338 
1339     if (s->pio_aiocb) {
1340         blk_aio_cancel(s->pio_aiocb);
1341         s->pio_aiocb = NULL;
1342     }
1343 
1344     if (s->reset_reverts) {
1345         s->reset_reverts = false;
1346         s->heads         = s->drive_heads;
1347         s->sectors       = s->drive_sectors;
1348     }
1349     if (s->drive_kind == IDE_CFATA)
1350         s->mult_sectors = 0;
1351     else
1352         s->mult_sectors = MAX_MULT_SECTORS;
1353     /* ide regs */
1354     s->feature = 0;
1355     s->error = 0;
1356     s->nsector = 0;
1357     s->sector = 0;
1358     s->lcyl = 0;
1359     s->hcyl = 0;
1360 
1361     /* lba48 */
1362     s->hob_feature = 0;
1363     s->hob_sector = 0;
1364     s->hob_nsector = 0;
1365     s->hob_lcyl = 0;
1366     s->hob_hcyl = 0;
1367 
1368     s->select = (ATA_DEV_ALWAYS_ON);
1369     s->status = READY_STAT | SEEK_STAT;
1370 
1371     s->lba48 = 0;
1372 
1373     /* ATAPI specific */
1374     s->sense_key = 0;
1375     s->asc = 0;
1376     s->cdrom_changed = 0;
1377     s->packet_transfer_size = 0;
1378     s->elementary_transfer_size = 0;
1379     s->io_buffer_index = 0;
1380     s->cd_sector_size = 0;
1381     s->atapi_dma = 0;
1382     s->tray_locked = 0;
1383     s->tray_open = 0;
1384     /* ATA DMA state */
1385     s->io_buffer_size = 0;
1386     s->req_nb_sectors = 0;
1387 
1388     ide_set_signature(s);
1389     /* init the transfer handler so that 0xffff is returned on data
1390        accesses */
1391     s->end_transfer_func = ide_dummy_transfer_stop;
1392     ide_dummy_transfer_stop(s);
1393     s->media_changed = 0;
1394 }
1395 
1396 static bool cmd_nop(IDEState *s, uint8_t cmd)
1397 {
1398     return true;
1399 }
1400 
1401 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1402 {
1403     /* Halt PIO (in the DRQ phase), then DMA */
1404     ide_transfer_halt(s);
1405     ide_cancel_dma_sync(s);
1406 
1407     /* Reset any PIO commands, reset signature, etc */
1408     ide_reset(s);
1409 
1410     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1411      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1412     s->status = 0x00;
1413 
1414     /* Do not overwrite status register */
1415     return false;
1416 }
1417 
1418 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1419 {
1420     switch (s->feature) {
1421     case DSM_TRIM:
1422         if (s->blk) {
1423             ide_sector_start_dma(s, IDE_DMA_TRIM);
1424             return false;
1425         }
1426         break;
1427     }
1428 
1429     ide_abort_command(s);
1430     return true;
1431 }
1432 
1433 static bool cmd_identify(IDEState *s, uint8_t cmd)
1434 {
1435     if (s->blk && s->drive_kind != IDE_CD) {
1436         if (s->drive_kind != IDE_CFATA) {
1437             ide_identify(s);
1438         } else {
1439             ide_cfata_identify(s);
1440         }
1441         s->status = READY_STAT | SEEK_STAT;
1442         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1443         ide_bus_set_irq(s->bus);
1444         return false;
1445     } else {
1446         if (s->drive_kind == IDE_CD) {
1447             ide_set_signature(s);
1448         }
1449         ide_abort_command(s);
1450     }
1451 
1452     return true;
1453 }
1454 
1455 static bool cmd_verify(IDEState *s, uint8_t cmd)
1456 {
1457     bool lba48 = (cmd == WIN_VERIFY_EXT);
1458 
1459     /* do sector number check ? */
1460     ide_cmd_lba48_transform(s, lba48);
1461 
1462     return true;
1463 }
1464 
1465 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1466 {
1467     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1468         /* Disable Read and Write Multiple */
1469         s->mult_sectors = 0;
1470     } else if ((s->nsector & 0xff) != 0 &&
1471         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1472          (s->nsector & (s->nsector - 1)) != 0)) {
1473         ide_abort_command(s);
1474     } else {
1475         s->mult_sectors = s->nsector & 0xff;
1476     }
1477 
1478     return true;
1479 }
1480 
1481 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1482 {
1483     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1484 
1485     if (!s->blk || !s->mult_sectors) {
1486         ide_abort_command(s);
1487         return true;
1488     }
1489 
1490     ide_cmd_lba48_transform(s, lba48);
1491     s->req_nb_sectors = s->mult_sectors;
1492     ide_sector_read(s);
1493     return false;
1494 }
1495 
1496 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1497 {
1498     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1499     int n;
1500 
1501     if (!s->blk || !s->mult_sectors) {
1502         ide_abort_command(s);
1503         return true;
1504     }
1505 
1506     ide_cmd_lba48_transform(s, lba48);
1507 
1508     s->req_nb_sectors = s->mult_sectors;
1509     n = MIN(s->nsector, s->req_nb_sectors);
1510 
1511     s->status = SEEK_STAT | READY_STAT;
1512     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1513 
1514     s->media_changed = 1;
1515 
1516     return false;
1517 }
1518 
1519 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1520 {
1521     bool lba48 = (cmd == WIN_READ_EXT);
1522 
1523     if (s->drive_kind == IDE_CD) {
1524         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1525         ide_abort_command(s);
1526         return true;
1527     }
1528 
1529     if (!s->blk) {
1530         ide_abort_command(s);
1531         return true;
1532     }
1533 
1534     ide_cmd_lba48_transform(s, lba48);
1535     s->req_nb_sectors = 1;
1536     ide_sector_read(s);
1537 
1538     return false;
1539 }
1540 
1541 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1542 {
1543     bool lba48 = (cmd == WIN_WRITE_EXT);
1544 
1545     if (!s->blk) {
1546         ide_abort_command(s);
1547         return true;
1548     }
1549 
1550     ide_cmd_lba48_transform(s, lba48);
1551 
1552     s->req_nb_sectors = 1;
1553     s->status = SEEK_STAT | READY_STAT;
1554     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1555 
1556     s->media_changed = 1;
1557 
1558     return false;
1559 }
1560 
1561 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1562 {
1563     bool lba48 = (cmd == WIN_READDMA_EXT);
1564 
1565     if (!s->blk) {
1566         ide_abort_command(s);
1567         return true;
1568     }
1569 
1570     ide_cmd_lba48_transform(s, lba48);
1571     ide_sector_start_dma(s, IDE_DMA_READ);
1572 
1573     return false;
1574 }
1575 
1576 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1577 {
1578     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1579 
1580     if (!s->blk) {
1581         ide_abort_command(s);
1582         return true;
1583     }
1584 
1585     ide_cmd_lba48_transform(s, lba48);
1586     ide_sector_start_dma(s, IDE_DMA_WRITE);
1587 
1588     s->media_changed = 1;
1589 
1590     return false;
1591 }
1592 
1593 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1594 {
1595     ide_flush_cache(s);
1596     return false;
1597 }
1598 
1599 static bool cmd_seek(IDEState *s, uint8_t cmd)
1600 {
1601     /* XXX: Check that seek is within bounds */
1602     return true;
1603 }
1604 
1605 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1606 {
1607     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1608 
1609     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1610     if (s->nb_sectors == 0) {
1611         ide_abort_command(s);
1612         return true;
1613     }
1614 
1615     ide_cmd_lba48_transform(s, lba48);
1616     ide_set_sector(s, s->nb_sectors - 1);
1617 
1618     return true;
1619 }
1620 
1621 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1622 {
1623     s->nsector = 0xff; /* device active or idle */
1624     return true;
1625 }
1626 
1627 /* INITIALIZE DEVICE PARAMETERS */
1628 static bool cmd_specify(IDEState *s, uint8_t cmd)
1629 {
1630     if (s->blk && s->drive_kind != IDE_CD) {
1631         s->heads = (s->select & (ATA_DEV_HS)) + 1;
1632         s->sectors = s->nsector;
1633         ide_bus_set_irq(s->bus);
1634     } else {
1635         ide_abort_command(s);
1636     }
1637 
1638     return true;
1639 }
1640 
1641 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1642 {
1643     uint16_t *identify_data;
1644 
1645     if (!s->blk) {
1646         ide_abort_command(s);
1647         return true;
1648     }
1649 
1650     /* XXX: valid for CDROM ? */
1651     switch (s->feature) {
1652     case 0x01: /* 8-bit I/O enable (CompactFlash) */
1653     case 0x81: /* 8-bit I/O disable (CompactFlash) */
1654         if (s->drive_kind != IDE_CFATA) {
1655             goto abort_cmd;
1656         }
1657         s->io8 = !(s->feature & 0x80);
1658         return true;
1659     case 0x02: /* write cache enable */
1660         blk_set_enable_write_cache(s->blk, true);
1661         identify_data = (uint16_t *)s->identify_data;
1662         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1663         return true;
1664     case 0x82: /* write cache disable */
1665         blk_set_enable_write_cache(s->blk, false);
1666         identify_data = (uint16_t *)s->identify_data;
1667         put_le16(identify_data + 85, (1 << 14) | 1);
1668         ide_flush_cache(s);
1669         return false;
1670     case 0xcc: /* reverting to power-on defaults enable */
1671         s->reset_reverts = true;
1672         return true;
1673     case 0x66: /* reverting to power-on defaults disable */
1674         s->reset_reverts = false;
1675         return true;
1676     case 0xaa: /* read look-ahead enable */
1677     case 0x55: /* read look-ahead disable */
1678     case 0x05: /* set advanced power management mode */
1679     case 0x85: /* disable advanced power management mode */
1680     case 0x69: /* NOP */
1681     case 0x67: /* NOP */
1682     case 0x96: /* NOP */
1683     case 0x9a: /* NOP */
1684     case 0x42: /* enable Automatic Acoustic Mode */
1685     case 0xc2: /* disable Automatic Acoustic Mode */
1686         return true;
1687     case 0x03: /* set transfer mode */
1688         {
1689             uint8_t val = s->nsector & 0x07;
1690             identify_data = (uint16_t *)s->identify_data;
1691 
1692             switch (s->nsector >> 3) {
1693             case 0x00: /* pio default */
1694             case 0x01: /* pio mode */
1695                 put_le16(identify_data + 62, 0x07);
1696                 put_le16(identify_data + 63, 0x07);
1697                 put_le16(identify_data + 88, 0x3f);
1698                 break;
1699             case 0x02: /* sigle word dma mode*/
1700                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1701                 put_le16(identify_data + 63, 0x07);
1702                 put_le16(identify_data + 88, 0x3f);
1703                 break;
1704             case 0x04: /* mdma mode */
1705                 put_le16(identify_data + 62, 0x07);
1706                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1707                 put_le16(identify_data + 88, 0x3f);
1708                 break;
1709             case 0x08: /* udma mode */
1710                 put_le16(identify_data + 62, 0x07);
1711                 put_le16(identify_data + 63, 0x07);
1712                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1713                 break;
1714             default:
1715                 goto abort_cmd;
1716             }
1717             return true;
1718         }
1719     }
1720 
1721 abort_cmd:
1722     ide_abort_command(s);
1723     return true;
1724 }
1725 
1726 
1727 /*** ATAPI commands ***/
1728 
1729 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1730 {
1731     ide_atapi_identify(s);
1732     s->status = READY_STAT | SEEK_STAT;
1733     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1734     ide_bus_set_irq(s->bus);
1735     return false;
1736 }
1737 
1738 /* EXECUTE DEVICE DIAGNOSTIC */
1739 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1740 {
1741     /*
1742      * Clear the device register per the ATA (v6) specification,
1743      * because ide_set_signature does not clear LBA or drive bits.
1744      */
1745     s->select = (ATA_DEV_ALWAYS_ON);
1746     ide_set_signature(s);
1747 
1748     if (s->drive_kind == IDE_CD) {
1749         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1750                         * devices to return a clear status register
1751                         * with READY_STAT *not* set. */
1752         s->error = 0x01;
1753     } else {
1754         s->status = READY_STAT | SEEK_STAT;
1755         /* The bits of the error register are not as usual for this command!
1756          * They are part of the regular output (this is why ERR_STAT isn't set)
1757          * Device 0 passed, Device 1 passed or not present. */
1758         s->error = 0x01;
1759         ide_bus_set_irq(s->bus);
1760     }
1761 
1762     return false;
1763 }
1764 
1765 static bool cmd_packet(IDEState *s, uint8_t cmd)
1766 {
1767     /* overlapping commands not supported */
1768     if (s->feature & 0x02) {
1769         ide_abort_command(s);
1770         return true;
1771     }
1772 
1773     s->status = READY_STAT | SEEK_STAT;
1774     s->atapi_dma = s->feature & 1;
1775     if (s->atapi_dma) {
1776         s->dma_cmd = IDE_DMA_ATAPI;
1777     }
1778     s->nsector = 1;
1779     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1780                        ide_atapi_cmd);
1781     return false;
1782 }
1783 
1784 
1785 /*** CF-ATA commands ***/
1786 
1787 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1788 {
1789     s->error = 0x09;    /* miscellaneous error */
1790     s->status = READY_STAT | SEEK_STAT;
1791     ide_bus_set_irq(s->bus);
1792 
1793     return false;
1794 }
1795 
1796 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1797 {
1798     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1799      * required for Windows 8 to work with AHCI */
1800 
1801     if (cmd == CFA_WEAR_LEVEL) {
1802         s->nsector = 0;
1803     }
1804 
1805     if (cmd == CFA_ERASE_SECTORS) {
1806         s->media_changed = 1;
1807     }
1808 
1809     return true;
1810 }
1811 
1812 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1813 {
1814     s->status = READY_STAT | SEEK_STAT;
1815 
1816     memset(s->io_buffer, 0, 0x200);
1817     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1818     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1819     s->io_buffer[0x02] = s->select;                 /* Head */
1820     s->io_buffer[0x03] = s->sector;                 /* Sector */
1821     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1822     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1823     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1824     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1825     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1826     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1827     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1828 
1829     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1830     ide_bus_set_irq(s->bus);
1831 
1832     return false;
1833 }
1834 
1835 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1836 {
1837     switch (s->feature) {
1838     case 0x02:  /* Inquiry Metadata Storage */
1839         ide_cfata_metadata_inquiry(s);
1840         break;
1841     case 0x03:  /* Read Metadata Storage */
1842         ide_cfata_metadata_read(s);
1843         break;
1844     case 0x04:  /* Write Metadata Storage */
1845         ide_cfata_metadata_write(s);
1846         break;
1847     default:
1848         ide_abort_command(s);
1849         return true;
1850     }
1851 
1852     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1853     s->status = 0x00; /* NOTE: READY is _not_ set */
1854     ide_bus_set_irq(s->bus);
1855 
1856     return false;
1857 }
1858 
1859 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1860 {
1861     switch (s->feature) {
1862     case 0x01:  /* sense temperature in device */
1863         s->nsector = 0x50;      /* +20 C */
1864         break;
1865     default:
1866         ide_abort_command(s);
1867         return true;
1868     }
1869 
1870     return true;
1871 }
1872 
1873 
1874 /*** SMART commands ***/
1875 
1876 static bool cmd_smart(IDEState *s, uint8_t cmd)
1877 {
1878     int n;
1879 
1880     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1881         goto abort_cmd;
1882     }
1883 
1884     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1885         goto abort_cmd;
1886     }
1887 
1888     switch (s->feature) {
1889     case SMART_DISABLE:
1890         s->smart_enabled = 0;
1891         return true;
1892 
1893     case SMART_ENABLE:
1894         s->smart_enabled = 1;
1895         return true;
1896 
1897     case SMART_ATTR_AUTOSAVE:
1898         switch (s->sector) {
1899         case 0x00:
1900             s->smart_autosave = 0;
1901             break;
1902         case 0xf1:
1903             s->smart_autosave = 1;
1904             break;
1905         default:
1906             goto abort_cmd;
1907         }
1908         return true;
1909 
1910     case SMART_STATUS:
1911         if (!s->smart_errors) {
1912             s->hcyl = 0xc2;
1913             s->lcyl = 0x4f;
1914         } else {
1915             s->hcyl = 0x2c;
1916             s->lcyl = 0xf4;
1917         }
1918         return true;
1919 
1920     case SMART_READ_THRESH:
1921         memset(s->io_buffer, 0, 0x200);
1922         s->io_buffer[0] = 0x01; /* smart struct version */
1923 
1924         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1925             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1926             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1927         }
1928 
1929         /* checksum */
1930         for (n = 0; n < 511; n++) {
1931             s->io_buffer[511] += s->io_buffer[n];
1932         }
1933         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1934 
1935         s->status = READY_STAT | SEEK_STAT;
1936         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1937         ide_bus_set_irq(s->bus);
1938         return false;
1939 
1940     case SMART_READ_DATA:
1941         memset(s->io_buffer, 0, 0x200);
1942         s->io_buffer[0] = 0x01; /* smart struct version */
1943 
1944         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1945             int i;
1946             for (i = 0; i < 11; i++) {
1947                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1948             }
1949         }
1950 
1951         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1952         if (s->smart_selftest_count == 0) {
1953             s->io_buffer[363] = 0;
1954         } else {
1955             s->io_buffer[363] =
1956                 s->smart_selftest_data[3 +
1957                            (s->smart_selftest_count - 1) *
1958                            24];
1959         }
1960         s->io_buffer[364] = 0x20;
1961         s->io_buffer[365] = 0x01;
1962         /* offline data collection capacity: execute + self-test*/
1963         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1964         s->io_buffer[368] = 0x03; /* smart capability (1) */
1965         s->io_buffer[369] = 0x00; /* smart capability (2) */
1966         s->io_buffer[370] = 0x01; /* error logging supported */
1967         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1968         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1969         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1970 
1971         for (n = 0; n < 511; n++) {
1972             s->io_buffer[511] += s->io_buffer[n];
1973         }
1974         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1975 
1976         s->status = READY_STAT | SEEK_STAT;
1977         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1978         ide_bus_set_irq(s->bus);
1979         return false;
1980 
1981     case SMART_READ_LOG:
1982         switch (s->sector) {
1983         case 0x01: /* summary smart error log */
1984             memset(s->io_buffer, 0, 0x200);
1985             s->io_buffer[0] = 0x01;
1986             s->io_buffer[1] = 0x00; /* no error entries */
1987             s->io_buffer[452] = s->smart_errors & 0xff;
1988             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1989 
1990             for (n = 0; n < 511; n++) {
1991                 s->io_buffer[511] += s->io_buffer[n];
1992             }
1993             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1994             break;
1995         case 0x06: /* smart self test log */
1996             memset(s->io_buffer, 0, 0x200);
1997             s->io_buffer[0] = 0x01;
1998             if (s->smart_selftest_count == 0) {
1999                 s->io_buffer[508] = 0;
2000             } else {
2001                 s->io_buffer[508] = s->smart_selftest_count;
2002                 for (n = 2; n < 506; n++)  {
2003                     s->io_buffer[n] = s->smart_selftest_data[n];
2004                 }
2005             }
2006 
2007             for (n = 0; n < 511; n++) {
2008                 s->io_buffer[511] += s->io_buffer[n];
2009             }
2010             s->io_buffer[511] = 0x100 - s->io_buffer[511];
2011             break;
2012         default:
2013             goto abort_cmd;
2014         }
2015         s->status = READY_STAT | SEEK_STAT;
2016         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2017         ide_bus_set_irq(s->bus);
2018         return false;
2019 
2020     case SMART_EXECUTE_OFFLINE:
2021         switch (s->sector) {
2022         case 0: /* off-line routine */
2023         case 1: /* short self test */
2024         case 2: /* extended self test */
2025             s->smart_selftest_count++;
2026             if (s->smart_selftest_count > 21) {
2027                 s->smart_selftest_count = 1;
2028             }
2029             n = 2 + (s->smart_selftest_count - 1) * 24;
2030             s->smart_selftest_data[n] = s->sector;
2031             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
2032             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
2033             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
2034             break;
2035         default:
2036             goto abort_cmd;
2037         }
2038         return true;
2039     }
2040 
2041 abort_cmd:
2042     ide_abort_command(s);
2043     return true;
2044 }
2045 
2046 #define HD_OK (1u << IDE_HD)
2047 #define CD_OK (1u << IDE_CD)
2048 #define CFA_OK (1u << IDE_CFATA)
2049 #define HD_CFA_OK (HD_OK | CFA_OK)
2050 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2051 
2052 /* Set the Disk Seek Completed status bit during completion */
2053 #define SET_DSC (1u << 8)
2054 
2055 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2056 static const struct {
2057     /* Returns true if the completion code should be run */
2058     bool (*handler)(IDEState *s, uint8_t cmd);
2059     int flags;
2060 } ide_cmd_table[0x100] = {
2061     /* NOP not implemented, mandatory for CD */
2062     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
2063     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2064     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2065     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2066     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2067     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2068     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2069     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2070     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2071     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2072     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2073     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2074     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2075     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2076     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2077     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2078     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2079     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2080     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2081     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2082     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2083     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2084     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2085     [WIN_SPECIFY]                 = { cmd_specify, HD_CFA_OK | SET_DSC },
2086     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2087     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2088     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2089     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2090     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2091     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2092     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2093     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2094     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2095     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2096     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2097     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2098     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2099     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2100     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2101     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2102     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2103     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2104     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2105     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2106     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2107     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2108     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2109     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2110     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2111     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2112     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2113     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2114     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2115     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2116     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2117     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2118 };
2119 
2120 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2121 {
2122     return cmd < ARRAY_SIZE(ide_cmd_table)
2123         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2124 }
2125 
2126 void ide_bus_exec_cmd(IDEBus *bus, uint32_t val)
2127 {
2128     IDEState *s;
2129     bool complete;
2130 
2131     s = ide_bus_active_if(bus);
2132     trace_ide_bus_exec_cmd(bus, s, val);
2133 
2134     /* ignore commands to non existent slave */
2135     if (s != bus->ifs && !s->blk) {
2136         return;
2137     }
2138 
2139     /* Only RESET is allowed while BSY and/or DRQ are set,
2140      * and only to ATAPI devices. */
2141     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2142         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2143             return;
2144         }
2145     }
2146 
2147     if (!ide_cmd_permitted(s, val)) {
2148         ide_abort_command(s);
2149         ide_bus_set_irq(s->bus);
2150         return;
2151     }
2152 
2153     s->status = READY_STAT | BUSY_STAT;
2154     s->error = 0;
2155     s->io_buffer_offset = 0;
2156 
2157     complete = ide_cmd_table[val].handler(s, val);
2158     if (complete) {
2159         s->status &= ~BUSY_STAT;
2160         assert(!!s->error == !!(s->status & ERR_STAT));
2161 
2162         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2163             s->status |= SEEK_STAT;
2164         }
2165 
2166         ide_cmd_done(s);
2167         ide_bus_set_irq(s->bus);
2168     }
2169 }
2170 
2171 /* IOport [R]ead [R]egisters */
2172 enum ATA_IOPORT_RR {
2173     ATA_IOPORT_RR_DATA = 0,
2174     ATA_IOPORT_RR_ERROR = 1,
2175     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2176     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2177     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2178     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2179     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2180     ATA_IOPORT_RR_STATUS = 7,
2181     ATA_IOPORT_RR_NUM_REGISTERS,
2182 };
2183 
2184 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2185     [ATA_IOPORT_RR_DATA] = "Data",
2186     [ATA_IOPORT_RR_ERROR] = "Error",
2187     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2188     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2189     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2190     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2191     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2192     [ATA_IOPORT_RR_STATUS] = "Status"
2193 };
2194 
2195 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2196 {
2197     IDEBus *bus = opaque;
2198     IDEState *s = ide_bus_active_if(bus);
2199     uint32_t reg_num;
2200     int ret, hob;
2201 
2202     reg_num = addr & 7;
2203     hob = bus->cmd & (IDE_CTRL_HOB);
2204     switch (reg_num) {
2205     case ATA_IOPORT_RR_DATA:
2206         /*
2207          * The pre-GRUB Solaris x86 bootloader relies upon inb
2208          * consuming a word from the drive's sector buffer.
2209          */
2210         ret = ide_data_readw(bus, addr) & 0xff;
2211         break;
2212     case ATA_IOPORT_RR_ERROR:
2213         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2214             (s != bus->ifs && !s->blk)) {
2215             ret = 0;
2216         } else if (!hob) {
2217             ret = s->error;
2218         } else {
2219             ret = s->hob_feature;
2220         }
2221         break;
2222     case ATA_IOPORT_RR_SECTOR_COUNT:
2223         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2224             ret = 0;
2225         } else if (!hob) {
2226             ret = s->nsector & 0xff;
2227         } else {
2228             ret = s->hob_nsector;
2229         }
2230         break;
2231     case ATA_IOPORT_RR_SECTOR_NUMBER:
2232         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2233             ret = 0;
2234         } else if (!hob) {
2235             ret = s->sector;
2236         } else {
2237             ret = s->hob_sector;
2238         }
2239         break;
2240     case ATA_IOPORT_RR_CYLINDER_LOW:
2241         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2242             ret = 0;
2243         } else if (!hob) {
2244             ret = s->lcyl;
2245         } else {
2246             ret = s->hob_lcyl;
2247         }
2248         break;
2249     case ATA_IOPORT_RR_CYLINDER_HIGH:
2250         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2251             ret = 0;
2252         } else if (!hob) {
2253             ret = s->hcyl;
2254         } else {
2255             ret = s->hob_hcyl;
2256         }
2257         break;
2258     case ATA_IOPORT_RR_DEVICE_HEAD:
2259         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2260             ret = 0;
2261         } else {
2262             ret = s->select;
2263         }
2264         break;
2265     default:
2266     case ATA_IOPORT_RR_STATUS:
2267         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2268             (s != bus->ifs && !s->blk)) {
2269             ret = 0;
2270         } else {
2271             ret = s->status;
2272         }
2273         qemu_irq_lower(bus->irq);
2274         break;
2275     }
2276 
2277     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2278     return ret;
2279 }
2280 
2281 uint32_t ide_status_read(void *opaque, uint32_t addr)
2282 {
2283     IDEBus *bus = opaque;
2284     IDEState *s = ide_bus_active_if(bus);
2285     int ret;
2286 
2287     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2288         (s != bus->ifs && !s->blk)) {
2289         ret = 0;
2290     } else {
2291         ret = s->status;
2292     }
2293 
2294     trace_ide_status_read(addr, ret, bus, s);
2295     return ret;
2296 }
2297 
2298 static void ide_perform_srst(IDEState *s)
2299 {
2300     s->status |= BUSY_STAT;
2301 
2302     /* Halt PIO (Via register state); PIO BH remains scheduled. */
2303     ide_transfer_halt(s);
2304 
2305     /* Cancel DMA -- may drain block device and invoke callbacks */
2306     ide_cancel_dma_sync(s);
2307 
2308     /* Cancel PIO callback, reset registers/signature, etc */
2309     ide_reset(s);
2310 
2311     /* perform diagnostic */
2312     cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2313 }
2314 
2315 static void ide_bus_perform_srst(void *opaque)
2316 {
2317     IDEBus *bus = opaque;
2318     IDEState *s;
2319     int i;
2320 
2321     for (i = 0; i < 2; i++) {
2322         s = &bus->ifs[i];
2323         ide_perform_srst(s);
2324     }
2325 
2326     bus->cmd &= ~IDE_CTRL_RESET;
2327 }
2328 
2329 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2330 {
2331     IDEBus *bus = opaque;
2332     IDEState *s;
2333     int i;
2334 
2335     trace_ide_ctrl_write(addr, val, bus);
2336 
2337     /* Device0 and Device1 each have their own control register,
2338      * but QEMU models it as just one register in the controller. */
2339     if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2340         for (i = 0; i < 2; i++) {
2341             s = &bus->ifs[i];
2342             s->status |= BUSY_STAT;
2343         }
2344         replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2345                                          ide_bus_perform_srst, bus);
2346     }
2347 
2348     bus->cmd = val;
2349 }
2350 
2351 /*
2352  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2353  * transferred from the device to the guest), false if it's a PIO in
2354  */
2355 static bool ide_is_pio_out(IDEState *s)
2356 {
2357     if (s->end_transfer_func == ide_sector_write ||
2358         s->end_transfer_func == ide_atapi_cmd) {
2359         return false;
2360     } else if (s->end_transfer_func == ide_sector_read ||
2361                s->end_transfer_func == ide_transfer_stop ||
2362                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2363                s->end_transfer_func == ide_dummy_transfer_stop) {
2364         return true;
2365     }
2366 
2367     abort();
2368 }
2369 
2370 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2371 {
2372     IDEBus *bus = opaque;
2373     IDEState *s = ide_bus_active_if(bus);
2374     uint8_t *p;
2375 
2376     trace_ide_data_writew(addr, val, bus, s);
2377 
2378     /* PIO data access allowed only when DRQ bit is set. The result of a write
2379      * during PIO out is indeterminate, just ignore it. */
2380     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2381         return;
2382     }
2383 
2384     p = s->data_ptr;
2385     if (s->io8) {
2386         if (p + 1 > s->data_end) {
2387             return;
2388         }
2389 
2390         *p++ = val;
2391     } else {
2392         if (p + 2 > s->data_end) {
2393             return;
2394         }
2395 
2396         *(uint16_t *)p = le16_to_cpu(val);
2397         p += 2;
2398     }
2399     s->data_ptr = p;
2400     if (p >= s->data_end) {
2401         s->status &= ~DRQ_STAT;
2402         s->end_transfer_func(s);
2403     }
2404 }
2405 
2406 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2407 {
2408     IDEBus *bus = opaque;
2409     IDEState *s = ide_bus_active_if(bus);
2410     uint8_t *p;
2411     int ret;
2412 
2413     /* PIO data access allowed only when DRQ bit is set. The result of a read
2414      * during PIO in is indeterminate, return 0 and don't move forward. */
2415     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2416         return 0;
2417     }
2418 
2419     p = s->data_ptr;
2420     if (s->io8) {
2421         if (p + 1 > s->data_end) {
2422             return 0;
2423         }
2424 
2425         ret = *p++;
2426     } else {
2427         if (p + 2 > s->data_end) {
2428             return 0;
2429         }
2430 
2431         ret = cpu_to_le16(*(uint16_t *)p);
2432         p += 2;
2433     }
2434     s->data_ptr = p;
2435     if (p >= s->data_end) {
2436         s->status &= ~DRQ_STAT;
2437         s->end_transfer_func(s);
2438     }
2439 
2440     trace_ide_data_readw(addr, ret, bus, s);
2441     return ret;
2442 }
2443 
2444 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2445 {
2446     IDEBus *bus = opaque;
2447     IDEState *s = ide_bus_active_if(bus);
2448     uint8_t *p;
2449 
2450     trace_ide_data_writel(addr, val, bus, s);
2451 
2452     /* PIO data access allowed only when DRQ bit is set. The result of a write
2453      * during PIO out is indeterminate, just ignore it. */
2454     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2455         return;
2456     }
2457 
2458     p = s->data_ptr;
2459     if (p + 4 > s->data_end) {
2460         return;
2461     }
2462 
2463     *(uint32_t *)p = le32_to_cpu(val);
2464     p += 4;
2465     s->data_ptr = p;
2466     if (p >= s->data_end) {
2467         s->status &= ~DRQ_STAT;
2468         s->end_transfer_func(s);
2469     }
2470 }
2471 
2472 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2473 {
2474     IDEBus *bus = opaque;
2475     IDEState *s = ide_bus_active_if(bus);
2476     uint8_t *p;
2477     int ret;
2478 
2479     /* PIO data access allowed only when DRQ bit is set. The result of a read
2480      * during PIO in is indeterminate, return 0 and don't move forward. */
2481     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2482         ret = 0;
2483         goto out;
2484     }
2485 
2486     p = s->data_ptr;
2487     if (p + 4 > s->data_end) {
2488         return 0;
2489     }
2490 
2491     ret = cpu_to_le32(*(uint32_t *)p);
2492     p += 4;
2493     s->data_ptr = p;
2494     if (p >= s->data_end) {
2495         s->status &= ~DRQ_STAT;
2496         s->end_transfer_func(s);
2497     }
2498 
2499 out:
2500     trace_ide_data_readl(addr, ret, bus, s);
2501     return ret;
2502 }
2503 
2504 static void ide_dummy_transfer_stop(IDEState *s)
2505 {
2506     s->data_ptr = s->io_buffer;
2507     s->data_end = s->io_buffer;
2508     s->io_buffer[0] = 0xff;
2509     s->io_buffer[1] = 0xff;
2510     s->io_buffer[2] = 0xff;
2511     s->io_buffer[3] = 0xff;
2512 }
2513 
2514 void ide_bus_reset(IDEBus *bus)
2515 {
2516     bus->unit = 0;
2517     bus->cmd = 0;
2518     ide_reset(&bus->ifs[0]);
2519     ide_reset(&bus->ifs[1]);
2520     ide_clear_hob(bus);
2521 
2522     /* pending async DMA */
2523     if (bus->dma->aiocb) {
2524         trace_ide_bus_reset_aio();
2525         blk_aio_cancel(bus->dma->aiocb);
2526         bus->dma->aiocb = NULL;
2527     }
2528 
2529     /* reset dma provider too */
2530     if (bus->dma->ops->reset) {
2531         bus->dma->ops->reset(bus->dma);
2532     }
2533 }
2534 
2535 static bool ide_cd_is_tray_open(void *opaque)
2536 {
2537     return ((IDEState *)opaque)->tray_open;
2538 }
2539 
2540 static bool ide_cd_is_medium_locked(void *opaque)
2541 {
2542     return ((IDEState *)opaque)->tray_locked;
2543 }
2544 
2545 static void ide_resize_cb(void *opaque)
2546 {
2547     IDEState *s = opaque;
2548     uint64_t nb_sectors;
2549 
2550     if (!s->identify_set) {
2551         return;
2552     }
2553 
2554     blk_get_geometry(s->blk, &nb_sectors);
2555     s->nb_sectors = nb_sectors;
2556 
2557     /* Update the identify data buffer. */
2558     if (s->drive_kind == IDE_CFATA) {
2559         ide_cfata_identify_size(s);
2560     } else {
2561         /* IDE_CD uses a different set of callbacks entirely. */
2562         assert(s->drive_kind != IDE_CD);
2563         ide_identify_size(s);
2564     }
2565 }
2566 
2567 static const BlockDevOps ide_cd_block_ops = {
2568     .change_media_cb = ide_cd_change_cb,
2569     .eject_request_cb = ide_cd_eject_request_cb,
2570     .is_tray_open = ide_cd_is_tray_open,
2571     .is_medium_locked = ide_cd_is_medium_locked,
2572 };
2573 
2574 static const BlockDevOps ide_hd_block_ops = {
2575     .resize_cb = ide_resize_cb,
2576 };
2577 
2578 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2579                    const char *version, const char *serial, const char *model,
2580                    uint64_t wwn,
2581                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2582                    int chs_trans, Error **errp)
2583 {
2584     uint64_t nb_sectors;
2585 
2586     s->blk = blk;
2587     s->drive_kind = kind;
2588 
2589     blk_get_geometry(blk, &nb_sectors);
2590     s->cylinders = cylinders;
2591     s->heads = s->drive_heads = heads;
2592     s->sectors = s->drive_sectors = secs;
2593     s->chs_trans = chs_trans;
2594     s->nb_sectors = nb_sectors;
2595     s->wwn = wwn;
2596     /* The SMART values should be preserved across power cycles
2597        but they aren't.  */
2598     s->smart_enabled = 1;
2599     s->smart_autosave = 1;
2600     s->smart_errors = 0;
2601     s->smart_selftest_count = 0;
2602     if (kind == IDE_CD) {
2603         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2604     } else {
2605         if (!blk_is_inserted(s->blk)) {
2606             error_setg(errp, "Device needs media, but drive is empty");
2607             return -1;
2608         }
2609         if (!blk_is_writable(blk)) {
2610             error_setg(errp, "Can't use a read-only drive");
2611             return -1;
2612         }
2613         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2614     }
2615     if (serial) {
2616         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2617     } else {
2618         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2619                  "QM%05d", s->drive_serial);
2620     }
2621     if (model) {
2622         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2623     } else {
2624         switch (kind) {
2625         case IDE_CD:
2626             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2627             break;
2628         case IDE_CFATA:
2629             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2630             break;
2631         default:
2632             strcpy(s->drive_model_str, "QEMU HARDDISK");
2633             break;
2634         }
2635     }
2636 
2637     if (version) {
2638         pstrcpy(s->version, sizeof(s->version), version);
2639     } else {
2640         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2641     }
2642 
2643     ide_reset(s);
2644     blk_iostatus_enable(blk);
2645     return 0;
2646 }
2647 
2648 static void ide_init1(IDEBus *bus, int unit)
2649 {
2650     static int drive_serial = 1;
2651     IDEState *s = &bus->ifs[unit];
2652 
2653     s->bus = bus;
2654     s->unit = unit;
2655     s->drive_serial = drive_serial++;
2656     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2657     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2658     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2659     memset(s->io_buffer, 0, s->io_buffer_total_len);
2660 
2661     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2662     memset(s->smart_selftest_data, 0, 512);
2663 
2664     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2665                                            ide_sector_write_timer_cb, s);
2666 }
2667 
2668 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2669 {
2670     return 0;
2671 }
2672 
2673 static void ide_nop(const IDEDMA *dma)
2674 {
2675 }
2676 
2677 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2678 {
2679     return 0;
2680 }
2681 
2682 static const IDEDMAOps ide_dma_nop_ops = {
2683     .prepare_buf    = ide_nop_int32,
2684     .restart_dma    = ide_nop,
2685     .rw_buf         = ide_nop_int,
2686 };
2687 
2688 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2689 {
2690     s->unit = s->bus->retry_unit;
2691     ide_set_sector(s, s->bus->retry_sector_num);
2692     s->nsector = s->bus->retry_nsector;
2693     s->bus->dma->ops->restart_dma(s->bus->dma);
2694     s->io_buffer_size = 0;
2695     s->dma_cmd = dma_cmd;
2696     ide_start_dma(s, ide_dma_cb);
2697 }
2698 
2699 static void ide_restart_bh(void *opaque)
2700 {
2701     IDEBus *bus = opaque;
2702     IDEState *s;
2703     bool is_read;
2704     int error_status;
2705 
2706     qemu_bh_delete(bus->bh);
2707     bus->bh = NULL;
2708 
2709     error_status = bus->error_status;
2710     if (bus->error_status == 0) {
2711         return;
2712     }
2713 
2714     s = ide_bus_active_if(bus);
2715     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2716 
2717     /* The error status must be cleared before resubmitting the request: The
2718      * request may fail again, and this case can only be distinguished if the
2719      * called function can set a new error status. */
2720     bus->error_status = 0;
2721 
2722     /* The HBA has generically asked to be kicked on retry */
2723     if (error_status & IDE_RETRY_HBA) {
2724         if (s->bus->dma->ops->restart) {
2725             s->bus->dma->ops->restart(s->bus->dma);
2726         }
2727     } else if (IS_IDE_RETRY_DMA(error_status)) {
2728         if (error_status & IDE_RETRY_TRIM) {
2729             ide_restart_dma(s, IDE_DMA_TRIM);
2730         } else {
2731             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2732         }
2733     } else if (IS_IDE_RETRY_PIO(error_status)) {
2734         if (is_read) {
2735             ide_sector_read(s);
2736         } else {
2737             ide_sector_write(s);
2738         }
2739     } else if (error_status & IDE_RETRY_FLUSH) {
2740         ide_flush_cache(s);
2741     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2742         assert(s->end_transfer_func == ide_atapi_cmd);
2743         ide_atapi_dma_restart(s);
2744     } else {
2745         abort();
2746     }
2747 }
2748 
2749 static void ide_restart_cb(void *opaque, bool running, RunState state)
2750 {
2751     IDEBus *bus = opaque;
2752 
2753     if (!running)
2754         return;
2755 
2756     if (!bus->bh) {
2757         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2758         qemu_bh_schedule(bus->bh);
2759     }
2760 }
2761 
2762 void ide_bus_register_restart_cb(IDEBus *bus)
2763 {
2764     if (bus->dma->ops->restart_dma) {
2765         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2766     }
2767 }
2768 
2769 static IDEDMA ide_dma_nop = {
2770     .ops = &ide_dma_nop_ops,
2771     .aiocb = NULL,
2772 };
2773 
2774 void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out)
2775 {
2776     int i;
2777 
2778     for(i = 0; i < 2; i++) {
2779         ide_init1(bus, i);
2780         ide_reset(&bus->ifs[i]);
2781     }
2782     bus->irq = irq_out;
2783     bus->dma = &ide_dma_nop;
2784 }
2785 
2786 void ide_bus_set_irq(IDEBus *bus)
2787 {
2788     if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) {
2789         qemu_irq_raise(bus->irq);
2790     }
2791 }
2792 
2793 void ide_exit(IDEState *s)
2794 {
2795     timer_free(s->sector_write_timer);
2796     qemu_vfree(s->smart_selftest_data);
2797     qemu_vfree(s->io_buffer);
2798 }
2799 
2800 static bool is_identify_set(void *opaque, int version_id)
2801 {
2802     IDEState *s = opaque;
2803 
2804     return s->identify_set != 0;
2805 }
2806 
2807 static EndTransferFunc* transfer_end_table[] = {
2808         ide_sector_read,
2809         ide_sector_write,
2810         ide_transfer_stop,
2811         ide_atapi_cmd_reply_end,
2812         ide_atapi_cmd,
2813         ide_dummy_transfer_stop,
2814 };
2815 
2816 static int transfer_end_table_idx(EndTransferFunc *fn)
2817 {
2818     int i;
2819 
2820     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2821         if (transfer_end_table[i] == fn)
2822             return i;
2823 
2824     return -1;
2825 }
2826 
2827 static int ide_drive_post_load(void *opaque, int version_id)
2828 {
2829     IDEState *s = opaque;
2830 
2831     if (s->blk && s->identify_set) {
2832         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2833     }
2834     return 0;
2835 }
2836 
2837 static int ide_drive_pio_post_load(void *opaque, int version_id)
2838 {
2839     IDEState *s = opaque;
2840 
2841     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2842         return -EINVAL;
2843     }
2844     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2845     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2846     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2847     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2848 
2849     return 0;
2850 }
2851 
2852 static int ide_drive_pio_pre_save(void *opaque)
2853 {
2854     IDEState *s = opaque;
2855     int idx;
2856 
2857     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2858     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2859 
2860     idx = transfer_end_table_idx(s->end_transfer_func);
2861     if (idx == -1) {
2862         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2863                         __func__);
2864         s->end_transfer_fn_idx = 2;
2865     } else {
2866         s->end_transfer_fn_idx = idx;
2867     }
2868 
2869     return 0;
2870 }
2871 
2872 static bool ide_drive_pio_state_needed(void *opaque)
2873 {
2874     IDEState *s = opaque;
2875 
2876     return ((s->status & DRQ_STAT) != 0)
2877         || (s->bus->error_status & IDE_RETRY_PIO);
2878 }
2879 
2880 static bool ide_tray_state_needed(void *opaque)
2881 {
2882     IDEState *s = opaque;
2883 
2884     return s->tray_open || s->tray_locked;
2885 }
2886 
2887 static bool ide_atapi_gesn_needed(void *opaque)
2888 {
2889     IDEState *s = opaque;
2890 
2891     return s->events.new_media || s->events.eject_request;
2892 }
2893 
2894 static bool ide_error_needed(void *opaque)
2895 {
2896     IDEBus *bus = opaque;
2897 
2898     return (bus->error_status != 0);
2899 }
2900 
2901 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2902 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2903     .name ="ide_drive/atapi/gesn_state",
2904     .version_id = 1,
2905     .minimum_version_id = 1,
2906     .needed = ide_atapi_gesn_needed,
2907     .fields = (VMStateField[]) {
2908         VMSTATE_BOOL(events.new_media, IDEState),
2909         VMSTATE_BOOL(events.eject_request, IDEState),
2910         VMSTATE_END_OF_LIST()
2911     }
2912 };
2913 
2914 static const VMStateDescription vmstate_ide_tray_state = {
2915     .name = "ide_drive/tray_state",
2916     .version_id = 1,
2917     .minimum_version_id = 1,
2918     .needed = ide_tray_state_needed,
2919     .fields = (VMStateField[]) {
2920         VMSTATE_BOOL(tray_open, IDEState),
2921         VMSTATE_BOOL(tray_locked, IDEState),
2922         VMSTATE_END_OF_LIST()
2923     }
2924 };
2925 
2926 static const VMStateDescription vmstate_ide_drive_pio_state = {
2927     .name = "ide_drive/pio_state",
2928     .version_id = 1,
2929     .minimum_version_id = 1,
2930     .pre_save = ide_drive_pio_pre_save,
2931     .post_load = ide_drive_pio_post_load,
2932     .needed = ide_drive_pio_state_needed,
2933     .fields = (VMStateField[]) {
2934         VMSTATE_INT32(req_nb_sectors, IDEState),
2935         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2936                              vmstate_info_uint8, uint8_t),
2937         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2938         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2939         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2940         VMSTATE_INT32(elementary_transfer_size, IDEState),
2941         VMSTATE_INT32(packet_transfer_size, IDEState),
2942         VMSTATE_END_OF_LIST()
2943     }
2944 };
2945 
2946 const VMStateDescription vmstate_ide_drive = {
2947     .name = "ide_drive",
2948     .version_id = 3,
2949     .minimum_version_id = 0,
2950     .post_load = ide_drive_post_load,
2951     .fields = (VMStateField[]) {
2952         VMSTATE_INT32(mult_sectors, IDEState),
2953         VMSTATE_INT32(identify_set, IDEState),
2954         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2955         VMSTATE_UINT8(feature, IDEState),
2956         VMSTATE_UINT8(error, IDEState),
2957         VMSTATE_UINT32(nsector, IDEState),
2958         VMSTATE_UINT8(sector, IDEState),
2959         VMSTATE_UINT8(lcyl, IDEState),
2960         VMSTATE_UINT8(hcyl, IDEState),
2961         VMSTATE_UINT8(hob_feature, IDEState),
2962         VMSTATE_UINT8(hob_sector, IDEState),
2963         VMSTATE_UINT8(hob_nsector, IDEState),
2964         VMSTATE_UINT8(hob_lcyl, IDEState),
2965         VMSTATE_UINT8(hob_hcyl, IDEState),
2966         VMSTATE_UINT8(select, IDEState),
2967         VMSTATE_UINT8(status, IDEState),
2968         VMSTATE_UINT8(lba48, IDEState),
2969         VMSTATE_UINT8(sense_key, IDEState),
2970         VMSTATE_UINT8(asc, IDEState),
2971         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2972         VMSTATE_END_OF_LIST()
2973     },
2974     .subsections = (const VMStateDescription*[]) {
2975         &vmstate_ide_drive_pio_state,
2976         &vmstate_ide_tray_state,
2977         &vmstate_ide_atapi_gesn_state,
2978         NULL
2979     }
2980 };
2981 
2982 static const VMStateDescription vmstate_ide_error_status = {
2983     .name ="ide_bus/error",
2984     .version_id = 2,
2985     .minimum_version_id = 1,
2986     .needed = ide_error_needed,
2987     .fields = (VMStateField[]) {
2988         VMSTATE_INT32(error_status, IDEBus),
2989         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2990         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2991         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2992         VMSTATE_END_OF_LIST()
2993     }
2994 };
2995 
2996 const VMStateDescription vmstate_ide_bus = {
2997     .name = "ide_bus",
2998     .version_id = 1,
2999     .minimum_version_id = 1,
3000     .fields = (VMStateField[]) {
3001         VMSTATE_UINT8(cmd, IDEBus),
3002         VMSTATE_UINT8(unit, IDEBus),
3003         VMSTATE_END_OF_LIST()
3004     },
3005     .subsections = (const VMStateDescription*[]) {
3006         &vmstate_ide_error_status,
3007         NULL
3008     }
3009 };
3010 
3011 void ide_drive_get(DriveInfo **hd, int n)
3012 {
3013     int i;
3014 
3015     for (i = 0; i < n; i++) {
3016         hd[i] = drive_get_by_index(IF_IDE, i);
3017     }
3018 }
3019