xref: /openbmc/qemu/hw/ide/core.c (revision 4921d0a7)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/irq.h"
28 #include "hw/isa/isa.h"
29 #include "migration/vmstate.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/timer.h"
33 #include "qemu/hw-version.h"
34 #include "qemu/memalign.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/blockdev.h"
37 #include "sysemu/dma.h"
38 #include "hw/block/block.h"
39 #include "sysemu/block-backend.h"
40 #include "qapi/error.h"
41 #include "qemu/cutils.h"
42 #include "sysemu/replay.h"
43 #include "sysemu/runstate.h"
44 #include "ide-internal.h"
45 #include "trace.h"
46 
47 /* These values were based on a Seagate ST3500418AS but have been modified
48    to make more sense in QEMU */
49 static const int smart_attributes[][12] = {
50     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
51     /* raw read error rate*/
52     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
53     /* spin up */
54     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55     /* start stop count */
56     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
57     /* remapped sectors */
58     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
59     /* power on hours */
60     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
61     /* power cycle count */
62     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
63     /* airflow-temperature-celsius */
64     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
65 };
66 
67 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
68     [IDE_DMA_READ] = "DMA READ",
69     [IDE_DMA_WRITE] = "DMA WRITE",
70     [IDE_DMA_TRIM] = "DMA TRIM",
71     [IDE_DMA_ATAPI] = "DMA ATAPI"
72 };
73 
74 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
75 {
76     if ((unsigned)enval < IDE_DMA__COUNT) {
77         return IDE_DMA_CMD_lookup[enval];
78     }
79     return "DMA UNKNOWN CMD";
80 }
81 
82 static void ide_dummy_transfer_stop(IDEState *s);
83 
84 const MemoryRegionPortio ide_portio_list[] = {
85     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
86     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
87     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
88     PORTIO_END_OF_LIST(),
89 };
90 
91 const MemoryRegionPortio ide_portio2_list[] = {
92     { 0, 1, 1, .read = ide_status_read, .write = ide_ctrl_write },
93     PORTIO_END_OF_LIST(),
94 };
95 
96 static void padstr(char *str, const char *src, int len)
97 {
98     int i, v;
99     for(i = 0; i < len; i++) {
100         if (*src)
101             v = *src++;
102         else
103             v = ' ';
104         str[i^1] = v;
105     }
106 }
107 
108 static void put_le16(uint16_t *p, unsigned int v)
109 {
110     *p = cpu_to_le16(v);
111 }
112 
113 static void ide_identify_size(IDEState *s)
114 {
115     uint16_t *p = (uint16_t *)s->identify_data;
116     int64_t nb_sectors_lba28 = s->nb_sectors;
117     if (nb_sectors_lba28 >= 1 << 28) {
118         nb_sectors_lba28 = (1 << 28) - 1;
119     }
120     put_le16(p + 60, nb_sectors_lba28);
121     put_le16(p + 61, nb_sectors_lba28 >> 16);
122     put_le16(p + 100, s->nb_sectors);
123     put_le16(p + 101, s->nb_sectors >> 16);
124     put_le16(p + 102, s->nb_sectors >> 32);
125     put_le16(p + 103, s->nb_sectors >> 48);
126 }
127 
128 static void ide_identify(IDEState *s)
129 {
130     uint16_t *p;
131     unsigned int oldsize;
132     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
133 
134     p = (uint16_t *)s->identify_data;
135     if (s->identify_set) {
136         goto fill_buffer;
137     }
138     memset(p, 0, sizeof(s->identify_data));
139 
140     put_le16(p + 0, 0x0040);
141     put_le16(p + 1, s->cylinders);
142     put_le16(p + 3, s->heads);
143     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
144     put_le16(p + 5, 512); /* XXX: retired, remove ? */
145     put_le16(p + 6, s->sectors);
146     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
147     put_le16(p + 20, 3); /* XXX: retired, remove ? */
148     put_le16(p + 21, 512); /* cache size in sectors */
149     put_le16(p + 22, 4); /* ecc bytes */
150     padstr((char *)(p + 23), s->version, 8); /* firmware version */
151     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
152 #if MAX_MULT_SECTORS > 1
153     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
154 #endif
155     put_le16(p + 48, 1); /* dword I/O */
156     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
157     put_le16(p + 51, 0x200); /* PIO transfer cycle */
158     put_le16(p + 52, 0x200); /* DMA transfer cycle */
159     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
160     put_le16(p + 54, s->cylinders);
161     put_le16(p + 55, s->heads);
162     put_le16(p + 56, s->sectors);
163     oldsize = s->cylinders * s->heads * s->sectors;
164     put_le16(p + 57, oldsize);
165     put_le16(p + 58, oldsize >> 16);
166     if (s->mult_sectors)
167         put_le16(p + 59, 0x100 | s->mult_sectors);
168     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
169     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
170     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
171     put_le16(p + 63, 0x07); /* mdma0-2 supported */
172     put_le16(p + 64, 0x03); /* pio3-4 supported */
173     put_le16(p + 65, 120);
174     put_le16(p + 66, 120);
175     put_le16(p + 67, 120);
176     put_le16(p + 68, 120);
177     if (dev && dev->conf.discard_granularity) {
178         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
179     }
180 
181     if (s->ncq_queues) {
182         put_le16(p + 75, s->ncq_queues - 1);
183         /* NCQ supported */
184         put_le16(p + 76, (1 << 8));
185     }
186 
187     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
188     put_le16(p + 81, 0x16); /* conforms to ata5 */
189     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
190     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
191     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
192     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
193     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
194     if (s->wwn) {
195         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
196     } else {
197         put_le16(p + 84, (1 << 14) | 0);
198     }
199     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
200     if (blk_enable_write_cache(s->blk)) {
201         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
202     } else {
203         put_le16(p + 85, (1 << 14) | 1);
204     }
205     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
206     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
207     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
208     if (s->wwn) {
209         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
210     } else {
211         put_le16(p + 87, (1 << 14) | 0);
212     }
213     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
214     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
215     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
216     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
217     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
218     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
219 
220     if (dev && dev->conf.physical_block_size)
221         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
222     if (s->wwn) {
223         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
224         put_le16(p + 108, s->wwn >> 48);
225         put_le16(p + 109, s->wwn >> 32);
226         put_le16(p + 110, s->wwn >> 16);
227         put_le16(p + 111, s->wwn);
228     }
229     if (dev && dev->conf.discard_granularity) {
230         put_le16(p + 169, 1); /* TRIM support */
231     }
232     if (dev) {
233         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
234     }
235 
236     ide_identify_size(s);
237     s->identify_set = 1;
238 
239 fill_buffer:
240     memcpy(s->io_buffer, p, sizeof(s->identify_data));
241 }
242 
243 static void ide_atapi_identify(IDEState *s)
244 {
245     uint16_t *p;
246 
247     p = (uint16_t *)s->identify_data;
248     if (s->identify_set) {
249         goto fill_buffer;
250     }
251     memset(p, 0, sizeof(s->identify_data));
252 
253     /* Removable CDROM, 50us response, 12 byte packets */
254     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
255     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
256     put_le16(p + 20, 3); /* buffer type */
257     put_le16(p + 21, 512); /* cache size in sectors */
258     put_le16(p + 22, 4); /* ecc bytes */
259     padstr((char *)(p + 23), s->version, 8); /* firmware version */
260     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
261     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
262 #ifdef USE_DMA_CDROM
263     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
264     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
265     put_le16(p + 62, 7);  /* single word dma0-2 supported */
266     put_le16(p + 63, 7);  /* mdma0-2 supported */
267 #else
268     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
269     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
270     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
271 #endif
272     put_le16(p + 64, 3); /* pio3-4 supported */
273     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
274     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
275     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
276     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
277 
278     put_le16(p + 71, 30); /* in ns */
279     put_le16(p + 72, 30); /* in ns */
280 
281     if (s->ncq_queues) {
282         put_le16(p + 75, s->ncq_queues - 1);
283         /* NCQ supported */
284         put_le16(p + 76, (1 << 8));
285     }
286 
287     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
288     if (s->wwn) {
289         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
290         put_le16(p + 87, (1 << 8)); /* WWN enabled */
291     }
292 
293 #ifdef USE_DMA_CDROM
294     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
295 #endif
296 
297     if (s->wwn) {
298         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
299         put_le16(p + 108, s->wwn >> 48);
300         put_le16(p + 109, s->wwn >> 32);
301         put_le16(p + 110, s->wwn >> 16);
302         put_le16(p + 111, s->wwn);
303     }
304 
305     s->identify_set = 1;
306 
307 fill_buffer:
308     memcpy(s->io_buffer, p, sizeof(s->identify_data));
309 }
310 
311 static void ide_cfata_identify_size(IDEState *s)
312 {
313     uint16_t *p = (uint16_t *)s->identify_data;
314     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
315     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
316     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
317     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
318 }
319 
320 static void ide_cfata_identify(IDEState *s)
321 {
322     uint16_t *p;
323     uint32_t cur_sec;
324 
325     p = (uint16_t *)s->identify_data;
326     if (s->identify_set) {
327         goto fill_buffer;
328     }
329     memset(p, 0, sizeof(s->identify_data));
330 
331     cur_sec = s->cylinders * s->heads * s->sectors;
332 
333     put_le16(p + 0, 0x848a);                    /* CF Storage Card signature */
334     put_le16(p + 1, s->cylinders);              /* Default cylinders */
335     put_le16(p + 3, s->heads);                  /* Default heads */
336     put_le16(p + 6, s->sectors);                /* Default sectors per track */
337     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
338     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
339     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
340     put_le16(p + 22, 0x0004);                   /* ECC bytes */
341     padstr((char *) (p + 23), s->version, 8);   /* Firmware Revision */
342     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
343 #if MAX_MULT_SECTORS > 1
344     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
345 #else
346     put_le16(p + 47, 0x0000);
347 #endif
348     put_le16(p + 49, 0x0f00);                   /* Capabilities */
349     put_le16(p + 51, 0x0002);                   /* PIO cycle timing mode */
350     put_le16(p + 52, 0x0001);                   /* DMA cycle timing mode */
351     put_le16(p + 53, 0x0003);                   /* Translation params valid */
352     put_le16(p + 54, s->cylinders);             /* Current cylinders */
353     put_le16(p + 55, s->heads);                 /* Current heads */
354     put_le16(p + 56, s->sectors);               /* Current sectors */
355     put_le16(p + 57, cur_sec);                  /* Current capacity */
356     put_le16(p + 58, cur_sec >> 16);            /* Current capacity */
357     if (s->mult_sectors)                        /* Multiple sector setting */
358         put_le16(p + 59, 0x100 | s->mult_sectors);
359     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
360     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
361     put_le16(p + 63, 0x0203);                   /* Multiword DMA capability */
362     put_le16(p + 64, 0x0001);                   /* Flow Control PIO support */
363     put_le16(p + 65, 0x0096);                   /* Min. Multiword DMA cycle */
364     put_le16(p + 66, 0x0096);                   /* Rec. Multiword DMA cycle */
365     put_le16(p + 68, 0x00b4);                   /* Min. PIO cycle time */
366     put_le16(p + 82, 0x400c);                   /* Command Set supported */
367     put_le16(p + 83, 0x7068);                   /* Command Set supported */
368     put_le16(p + 84, 0x4000);                   /* Features supported */
369     put_le16(p + 85, 0x000c);                   /* Command Set enabled */
370     put_le16(p + 86, 0x7044);                   /* Command Set enabled */
371     put_le16(p + 87, 0x4000);                   /* Features enabled */
372     put_le16(p + 91, 0x4060);                   /* Current APM level */
373     put_le16(p + 129, 0x0002);                  /* Current features option */
374     put_le16(p + 130, 0x0005);                  /* Reassigned sectors */
375     put_le16(p + 131, 0x0001);                  /* Initial power mode */
376     put_le16(p + 132, 0x0000);                  /* User signature */
377     put_le16(p + 160, 0x8100);                  /* Power requirement */
378     put_le16(p + 161, 0x8001);                  /* CF command set */
379 
380     ide_cfata_identify_size(s);
381     s->identify_set = 1;
382 
383 fill_buffer:
384     memcpy(s->io_buffer, p, sizeof(s->identify_data));
385 }
386 
387 static void ide_set_signature(IDEState *s)
388 {
389     s->select &= ~(ATA_DEV_HS); /* clear head */
390     /* put signature */
391     s->nsector = 1;
392     s->sector = 1;
393     if (s->drive_kind == IDE_CD) {
394         s->lcyl = 0x14;
395         s->hcyl = 0xeb;
396     } else if (s->blk) {
397         s->lcyl = 0;
398         s->hcyl = 0;
399     } else {
400         s->lcyl = 0xff;
401         s->hcyl = 0xff;
402     }
403 }
404 
405 static bool ide_sect_range_ok(IDEState *s,
406                               uint64_t sector, uint64_t nb_sectors)
407 {
408     uint64_t total_sectors;
409 
410     blk_get_geometry(s->blk, &total_sectors);
411     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
412         return false;
413     }
414     return true;
415 }
416 
417 typedef struct TrimAIOCB {
418     BlockAIOCB common;
419     IDEState *s;
420     QEMUBH *bh;
421     int ret;
422     QEMUIOVector *qiov;
423     BlockAIOCB *aiocb;
424     int i, j;
425 } TrimAIOCB;
426 
427 static void trim_aio_cancel(BlockAIOCB *acb)
428 {
429     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
430 
431     /* Exit the loop so ide_issue_trim_cb will not continue  */
432     iocb->j = iocb->qiov->niov - 1;
433     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
434 
435     iocb->ret = -ECANCELED;
436 
437     if (iocb->aiocb) {
438         blk_aio_cancel_async(iocb->aiocb);
439         iocb->aiocb = NULL;
440     }
441 }
442 
443 static const AIOCBInfo trim_aiocb_info = {
444     .aiocb_size         = sizeof(TrimAIOCB),
445     .cancel_async       = trim_aio_cancel,
446 };
447 
448 static void ide_trim_bh_cb(void *opaque)
449 {
450     TrimAIOCB *iocb = opaque;
451     BlockBackend *blk = iocb->s->blk;
452 
453     iocb->common.cb(iocb->common.opaque, iocb->ret);
454 
455     qemu_bh_delete(iocb->bh);
456     iocb->bh = NULL;
457     qemu_aio_unref(iocb);
458 
459     /* Paired with an increment in ide_issue_trim() */
460     blk_dec_in_flight(blk);
461 }
462 
463 static void ide_issue_trim_cb(void *opaque, int ret)
464 {
465     TrimAIOCB *iocb = opaque;
466     IDEState *s = iocb->s;
467 
468     if (iocb->i >= 0) {
469         if (ret >= 0) {
470             block_acct_done(blk_get_stats(s->blk), &s->acct);
471         } else {
472             block_acct_failed(blk_get_stats(s->blk), &s->acct);
473         }
474     }
475 
476     if (ret >= 0) {
477         while (iocb->j < iocb->qiov->niov) {
478             int j = iocb->j;
479             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
480                 int i = iocb->i;
481                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
482 
483                 /* 6-byte LBA + 2-byte range per entry */
484                 uint64_t entry = le64_to_cpu(buffer[i]);
485                 uint64_t sector = entry & 0x0000ffffffffffffULL;
486                 uint16_t count = entry >> 48;
487 
488                 if (count == 0) {
489                     continue;
490                 }
491 
492                 if (!ide_sect_range_ok(s, sector, count)) {
493                     block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
494                     iocb->ret = -EINVAL;
495                     goto done;
496                 }
497 
498                 block_acct_start(blk_get_stats(s->blk), &s->acct,
499                                  count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
500 
501                 /* Got an entry! Submit and exit.  */
502                 iocb->aiocb = blk_aio_pdiscard(s->blk,
503                                                sector << BDRV_SECTOR_BITS,
504                                                count << BDRV_SECTOR_BITS,
505                                                ide_issue_trim_cb, opaque);
506                 return;
507             }
508 
509             iocb->j++;
510             iocb->i = -1;
511         }
512     } else {
513         iocb->ret = ret;
514     }
515 
516 done:
517     iocb->aiocb = NULL;
518     if (iocb->bh) {
519         replay_bh_schedule_event(iocb->bh);
520     }
521 }
522 
523 BlockAIOCB *ide_issue_trim(
524         int64_t offset, QEMUIOVector *qiov,
525         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
526 {
527     IDEState *s = opaque;
528     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
529     TrimAIOCB *iocb;
530 
531     /* Paired with a decrement in ide_trim_bh_cb() */
532     blk_inc_in_flight(s->blk);
533 
534     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
535     iocb->s = s;
536     iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb,
537                                    &DEVICE(dev)->mem_reentrancy_guard);
538     iocb->ret = 0;
539     iocb->qiov = qiov;
540     iocb->i = -1;
541     iocb->j = 0;
542     ide_issue_trim_cb(iocb, 0);
543     return &iocb->common;
544 }
545 
546 void ide_abort_command(IDEState *s)
547 {
548     s->status = READY_STAT | ERR_STAT;
549     s->error = ABRT_ERR;
550     ide_transfer_stop(s);
551 }
552 
553 static void ide_set_retry(IDEState *s)
554 {
555     s->bus->retry_unit = s->unit;
556     s->bus->retry_sector_num = ide_get_sector(s);
557     s->bus->retry_nsector = s->nsector;
558 }
559 
560 static void ide_clear_retry(IDEState *s)
561 {
562     s->bus->retry_unit = -1;
563     s->bus->retry_sector_num = 0;
564     s->bus->retry_nsector = 0;
565 }
566 
567 /* prepare data transfer and tell what to do after */
568 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
569                                   EndTransferFunc *end_transfer_func)
570 {
571     s->data_ptr = buf;
572     s->data_end = buf + size;
573     ide_set_retry(s);
574     if (!(s->status & ERR_STAT)) {
575         s->status |= DRQ_STAT;
576     }
577     if (!s->bus->dma->ops->pio_transfer) {
578         s->end_transfer_func = end_transfer_func;
579         return false;
580     }
581     s->bus->dma->ops->pio_transfer(s->bus->dma);
582     return true;
583 }
584 
585 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
586                         EndTransferFunc *end_transfer_func)
587 {
588     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
589         end_transfer_func(s);
590     }
591 }
592 
593 static void ide_cmd_done(IDEState *s)
594 {
595     if (s->bus->dma->ops->cmd_done) {
596         s->bus->dma->ops->cmd_done(s->bus->dma);
597     }
598 }
599 
600 static void ide_transfer_halt(IDEState *s)
601 {
602     s->end_transfer_func = ide_transfer_stop;
603     s->data_ptr = s->io_buffer;
604     s->data_end = s->io_buffer;
605     s->status &= ~DRQ_STAT;
606 }
607 
608 void ide_transfer_stop(IDEState *s)
609 {
610     ide_transfer_halt(s);
611     ide_cmd_done(s);
612 }
613 
614 int64_t ide_get_sector(IDEState *s)
615 {
616     int64_t sector_num;
617     if (s->select & (ATA_DEV_LBA)) {
618         if (s->lba48) {
619             sector_num = ((int64_t)s->hob_hcyl << 40) |
620                 ((int64_t) s->hob_lcyl << 32) |
621                 ((int64_t) s->hob_sector << 24) |
622                 ((int64_t) s->hcyl << 16) |
623                 ((int64_t) s->lcyl << 8) | s->sector;
624         } else {
625             /* LBA28 */
626             sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
627                 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
628         }
629     } else {
630         /* CHS */
631         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
632             (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
633     }
634 
635     return sector_num;
636 }
637 
638 void ide_set_sector(IDEState *s, int64_t sector_num)
639 {
640     unsigned int cyl, r;
641     if (s->select & (ATA_DEV_LBA)) {
642         if (s->lba48) {
643             s->sector = sector_num;
644             s->lcyl = sector_num >> 8;
645             s->hcyl = sector_num >> 16;
646             s->hob_sector = sector_num >> 24;
647             s->hob_lcyl = sector_num >> 32;
648             s->hob_hcyl = sector_num >> 40;
649         } else {
650             /* LBA28 */
651             s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
652                 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
653             s->hcyl = (sector_num >> 16);
654             s->lcyl = (sector_num >> 8);
655             s->sector = (sector_num);
656         }
657     } else {
658         /* CHS */
659         cyl = sector_num / (s->heads * s->sectors);
660         r = sector_num % (s->heads * s->sectors);
661         s->hcyl = cyl >> 8;
662         s->lcyl = cyl;
663         s->select = (s->select & ~(ATA_DEV_HS)) |
664             ((r / s->sectors) & (ATA_DEV_HS));
665         s->sector = (r % s->sectors) + 1;
666     }
667 }
668 
669 static void ide_rw_error(IDEState *s) {
670     ide_abort_command(s);
671     ide_bus_set_irq(s->bus);
672 }
673 
674 static void ide_buffered_readv_cb(void *opaque, int ret)
675 {
676     IDEBufferedRequest *req = opaque;
677     if (!req->orphaned) {
678         if (!ret) {
679             assert(req->qiov.size == req->original_qiov->size);
680             qemu_iovec_from_buf(req->original_qiov, 0,
681                                 req->qiov.local_iov.iov_base,
682                                 req->original_qiov->size);
683         }
684         req->original_cb(req->original_opaque, ret);
685     }
686     QLIST_REMOVE(req, list);
687     qemu_vfree(qemu_iovec_buf(&req->qiov));
688     g_free(req);
689 }
690 
691 #define MAX_BUFFERED_REQS 16
692 
693 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
694                                QEMUIOVector *iov, int nb_sectors,
695                                BlockCompletionFunc *cb, void *opaque)
696 {
697     BlockAIOCB *aioreq;
698     IDEBufferedRequest *req;
699     int c = 0;
700 
701     QLIST_FOREACH(req, &s->buffered_requests, list) {
702         c++;
703     }
704     if (c > MAX_BUFFERED_REQS) {
705         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
706     }
707 
708     req = g_new0(IDEBufferedRequest, 1);
709     req->original_qiov = iov;
710     req->original_cb = cb;
711     req->original_opaque = opaque;
712     qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
713                         iov->size);
714 
715     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
716                             &req->qiov, 0, ide_buffered_readv_cb, req);
717 
718     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
719     return aioreq;
720 }
721 
722 /**
723  * Cancel all pending DMA requests.
724  * Any buffered DMA requests are instantly canceled,
725  * but any pending unbuffered DMA requests must be waited on.
726  */
727 void ide_cancel_dma_sync(IDEState *s)
728 {
729     IDEBufferedRequest *req;
730 
731     /* First invoke the callbacks of all buffered requests
732      * and flag those requests as orphaned. Ideally there
733      * are no unbuffered (Scatter Gather DMA Requests or
734      * write requests) pending and we can avoid to drain. */
735     QLIST_FOREACH(req, &s->buffered_requests, list) {
736         if (!req->orphaned) {
737             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
738             req->original_cb(req->original_opaque, -ECANCELED);
739         }
740         req->orphaned = true;
741     }
742 
743     /*
744      * We can't cancel Scatter Gather DMA in the middle of the
745      * operation or a partial (not full) DMA transfer would reach
746      * the storage so we wait for completion instead (we behave
747      * like if the DMA was completed by the time the guest trying
748      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
749      * set).
750      *
751      * In the future we'll be able to safely cancel the I/O if the
752      * whole DMA operation will be submitted to disk with a single
753      * aio operation with preadv/pwritev.
754      */
755     if (s->bus->dma->aiocb) {
756         trace_ide_cancel_dma_sync_remaining();
757         blk_drain(s->blk);
758         assert(s->bus->dma->aiocb == NULL);
759     }
760 }
761 
762 static void ide_sector_read(IDEState *s);
763 
764 static void ide_sector_read_cb(void *opaque, int ret)
765 {
766     IDEState *s = opaque;
767     int n;
768 
769     s->pio_aiocb = NULL;
770     s->status &= ~BUSY_STAT;
771 
772     if (ret != 0) {
773         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
774                                 IDE_RETRY_READ)) {
775             return;
776         }
777     }
778 
779     block_acct_done(blk_get_stats(s->blk), &s->acct);
780 
781     n = s->nsector;
782     if (n > s->req_nb_sectors) {
783         n = s->req_nb_sectors;
784     }
785 
786     ide_set_sector(s, ide_get_sector(s) + n);
787     s->nsector -= n;
788     /* Allow the guest to read the io_buffer */
789     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
790     ide_bus_set_irq(s->bus);
791 }
792 
793 static void ide_sector_read(IDEState *s)
794 {
795     int64_t sector_num;
796     int n;
797 
798     s->status = READY_STAT | SEEK_STAT;
799     s->error = 0; /* not needed by IDE spec, but needed by Windows */
800     sector_num = ide_get_sector(s);
801     n = s->nsector;
802 
803     if (n == 0) {
804         ide_transfer_stop(s);
805         return;
806     }
807 
808     s->status |= BUSY_STAT;
809 
810     if (n > s->req_nb_sectors) {
811         n = s->req_nb_sectors;
812     }
813 
814     trace_ide_sector_read(sector_num, n);
815 
816     if (!ide_sect_range_ok(s, sector_num, n)) {
817         ide_rw_error(s);
818         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
819         return;
820     }
821 
822     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
823 
824     block_acct_start(blk_get_stats(s->blk), &s->acct,
825                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
826     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
827                                       ide_sector_read_cb, s);
828 }
829 
830 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
831 {
832     if (s->bus->dma->ops->commit_buf) {
833         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
834     }
835     s->io_buffer_offset += tx_bytes;
836     qemu_sglist_destroy(&s->sg);
837 }
838 
839 void ide_set_inactive(IDEState *s, bool more)
840 {
841     s->bus->dma->aiocb = NULL;
842     ide_clear_retry(s);
843     if (s->bus->dma->ops->set_inactive) {
844         s->bus->dma->ops->set_inactive(s->bus->dma, more);
845     }
846     ide_cmd_done(s);
847 }
848 
849 void ide_dma_error(IDEState *s)
850 {
851     dma_buf_commit(s, 0);
852     ide_abort_command(s);
853     ide_set_inactive(s, false);
854     ide_bus_set_irq(s->bus);
855 }
856 
857 int ide_handle_rw_error(IDEState *s, int error, int op)
858 {
859     bool is_read = (op & IDE_RETRY_READ) != 0;
860     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
861 
862     if (action == BLOCK_ERROR_ACTION_STOP) {
863         assert(s->bus->retry_unit == s->unit);
864         s->bus->error_status = op;
865     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
866         block_acct_failed(blk_get_stats(s->blk), &s->acct);
867         if (IS_IDE_RETRY_DMA(op)) {
868             ide_dma_error(s);
869         } else if (IS_IDE_RETRY_ATAPI(op)) {
870             ide_atapi_io_error(s, -error);
871         } else {
872             ide_rw_error(s);
873         }
874     }
875     blk_error_action(s->blk, action, is_read, error);
876     return action != BLOCK_ERROR_ACTION_IGNORE;
877 }
878 
879 static void ide_dma_cb(void *opaque, int ret)
880 {
881     IDEState *s = opaque;
882     int n;
883     int64_t sector_num;
884     uint64_t offset;
885     bool stay_active = false;
886     int32_t prep_size = 0;
887 
888     if (ret == -EINVAL) {
889         ide_dma_error(s);
890         return;
891     }
892 
893     if (ret < 0) {
894         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
895             s->bus->dma->aiocb = NULL;
896             dma_buf_commit(s, 0);
897             return;
898         }
899     }
900 
901     if (s->io_buffer_size > s->nsector * 512) {
902         /*
903          * The PRDs were longer than needed for this request.
904          * The Active bit must remain set after the request completes.
905          */
906         n = s->nsector;
907         stay_active = true;
908     } else {
909         n = s->io_buffer_size >> 9;
910     }
911 
912     sector_num = ide_get_sector(s);
913     if (n > 0) {
914         assert(n * 512 == s->sg.size);
915         dma_buf_commit(s, s->sg.size);
916         sector_num += n;
917         ide_set_sector(s, sector_num);
918         s->nsector -= n;
919     }
920 
921     /* end of transfer ? */
922     if (s->nsector == 0) {
923         s->status = READY_STAT | SEEK_STAT;
924         ide_bus_set_irq(s->bus);
925         goto eot;
926     }
927 
928     /* launch next transfer */
929     n = s->nsector;
930     s->io_buffer_index = 0;
931     s->io_buffer_size = n * 512;
932     prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
933     /* prepare_buf() must succeed and respect the limit */
934     assert(prep_size >= 0 && prep_size <= n * 512);
935 
936     /*
937      * Now prep_size stores the number of bytes in the sglist, and
938      * s->io_buffer_size stores the number of bytes described by the PRDs.
939      */
940 
941     if (prep_size < n * 512) {
942         /*
943          * The PRDs are too short for this request. Error condition!
944          * Reset the Active bit and don't raise the interrupt.
945          */
946         s->status = READY_STAT | SEEK_STAT;
947         dma_buf_commit(s, 0);
948         goto eot;
949     }
950 
951     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
952 
953     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
954         !ide_sect_range_ok(s, sector_num, n)) {
955         ide_dma_error(s);
956         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
957         return;
958     }
959 
960     offset = sector_num << BDRV_SECTOR_BITS;
961     switch (s->dma_cmd) {
962     case IDE_DMA_READ:
963         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
964                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
965         break;
966     case IDE_DMA_WRITE:
967         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
968                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
969         break;
970     case IDE_DMA_TRIM:
971         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
972                                         &s->sg, offset, BDRV_SECTOR_SIZE,
973                                         ide_issue_trim, s, ide_dma_cb, s,
974                                         DMA_DIRECTION_TO_DEVICE);
975         break;
976     default:
977         abort();
978     }
979     return;
980 
981 eot:
982     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
983         block_acct_done(blk_get_stats(s->blk), &s->acct);
984     }
985     ide_set_inactive(s, stay_active);
986 }
987 
988 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
989 {
990     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
991     s->io_buffer_size = 0;
992     s->dma_cmd = dma_cmd;
993 
994     switch (dma_cmd) {
995     case IDE_DMA_READ:
996         block_acct_start(blk_get_stats(s->blk), &s->acct,
997                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
998         break;
999     case IDE_DMA_WRITE:
1000         block_acct_start(blk_get_stats(s->blk), &s->acct,
1001                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1002         break;
1003     default:
1004         break;
1005     }
1006 
1007     ide_start_dma(s, ide_dma_cb);
1008 }
1009 
1010 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
1011 {
1012     s->io_buffer_index = 0;
1013     ide_set_retry(s);
1014     if (s->bus->dma->ops->start_dma) {
1015         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
1016     }
1017 }
1018 
1019 static void ide_sector_write(IDEState *s);
1020 
1021 static void ide_sector_write_timer_cb(void *opaque)
1022 {
1023     IDEState *s = opaque;
1024     ide_bus_set_irq(s->bus);
1025 }
1026 
1027 static void ide_sector_write_cb(void *opaque, int ret)
1028 {
1029     IDEState *s = opaque;
1030     int n;
1031 
1032     s->pio_aiocb = NULL;
1033     s->status &= ~BUSY_STAT;
1034 
1035     if (ret != 0) {
1036         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1037             return;
1038         }
1039     }
1040 
1041     block_acct_done(blk_get_stats(s->blk), &s->acct);
1042 
1043     n = s->nsector;
1044     if (n > s->req_nb_sectors) {
1045         n = s->req_nb_sectors;
1046     }
1047     s->nsector -= n;
1048 
1049     ide_set_sector(s, ide_get_sector(s) + n);
1050     if (s->nsector == 0) {
1051         /* no more sectors to write */
1052         ide_transfer_stop(s);
1053     } else {
1054         int n1 = s->nsector;
1055         if (n1 > s->req_nb_sectors) {
1056             n1 = s->req_nb_sectors;
1057         }
1058         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1059                            ide_sector_write);
1060     }
1061 
1062     if (s->win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1063         /* It seems there is a bug in the Windows 2000 installer HDD
1064            IDE driver which fills the disk with empty logs when the
1065            IDE write IRQ comes too early. This hack tries to correct
1066            that at the expense of slower write performances. Use this
1067            option _only_ to install Windows 2000. You must disable it
1068            for normal use. */
1069         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1070                   (NANOSECONDS_PER_SECOND / 1000));
1071     } else {
1072         ide_bus_set_irq(s->bus);
1073     }
1074 }
1075 
1076 static void ide_sector_write(IDEState *s)
1077 {
1078     int64_t sector_num;
1079     int n;
1080 
1081     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1082     sector_num = ide_get_sector(s);
1083 
1084     n = s->nsector;
1085     if (n > s->req_nb_sectors) {
1086         n = s->req_nb_sectors;
1087     }
1088 
1089     trace_ide_sector_write(sector_num, n);
1090 
1091     if (!ide_sect_range_ok(s, sector_num, n)) {
1092         ide_rw_error(s);
1093         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1094         return;
1095     }
1096 
1097     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1098 
1099     block_acct_start(blk_get_stats(s->blk), &s->acct,
1100                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1101     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1102                                    &s->qiov, 0, ide_sector_write_cb, s);
1103 }
1104 
1105 static void ide_flush_cb(void *opaque, int ret)
1106 {
1107     IDEState *s = opaque;
1108 
1109     s->pio_aiocb = NULL;
1110 
1111     if (ret < 0) {
1112         /* XXX: What sector number to set here? */
1113         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1114             return;
1115         }
1116     }
1117 
1118     if (s->blk) {
1119         block_acct_done(blk_get_stats(s->blk), &s->acct);
1120     }
1121     s->status = READY_STAT | SEEK_STAT;
1122     ide_cmd_done(s);
1123     ide_bus_set_irq(s->bus);
1124 }
1125 
1126 static void ide_flush_cache(IDEState *s)
1127 {
1128     if (s->blk == NULL) {
1129         ide_flush_cb(s, 0);
1130         return;
1131     }
1132 
1133     s->status |= BUSY_STAT;
1134     ide_set_retry(s);
1135     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1136     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1137 }
1138 
1139 static void ide_cfata_metadata_inquiry(IDEState *s)
1140 {
1141     uint16_t *p;
1142     uint32_t spd;
1143 
1144     p = (uint16_t *) s->io_buffer;
1145     memset(p, 0, 0x200);
1146     spd = ((s->mdata_size - 1) >> 9) + 1;
1147 
1148     put_le16(p + 0, 0x0001);                    /* Data format revision */
1149     put_le16(p + 1, 0x0000);                    /* Media property: silicon */
1150     put_le16(p + 2, s->media_changed);          /* Media status */
1151     put_le16(p + 3, s->mdata_size & 0xffff);    /* Capacity in bytes (low) */
1152     put_le16(p + 4, s->mdata_size >> 16);       /* Capacity in bytes (high) */
1153     put_le16(p + 5, spd & 0xffff);              /* Sectors per device (low) */
1154     put_le16(p + 6, spd >> 16);                 /* Sectors per device (high) */
1155 }
1156 
1157 static void ide_cfata_metadata_read(IDEState *s)
1158 {
1159     uint16_t *p;
1160 
1161     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1162         s->status = ERR_STAT;
1163         s->error = ABRT_ERR;
1164         return;
1165     }
1166 
1167     p = (uint16_t *) s->io_buffer;
1168     memset(p, 0, 0x200);
1169 
1170     put_le16(p + 0, s->media_changed);          /* Media status */
1171     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1172                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1173                                     s->nsector << 9), 0x200 - 2));
1174 }
1175 
1176 static void ide_cfata_metadata_write(IDEState *s)
1177 {
1178     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1179         s->status = ERR_STAT;
1180         s->error = ABRT_ERR;
1181         return;
1182     }
1183 
1184     s->media_changed = 0;
1185 
1186     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1187                     s->io_buffer + 2,
1188                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1189                                     s->nsector << 9), 0x200 - 2));
1190 }
1191 
1192 /* called when the inserted state of the media has changed */
1193 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1194 {
1195     IDEState *s = opaque;
1196     uint64_t nb_sectors;
1197 
1198     s->tray_open = !load;
1199     blk_get_geometry(s->blk, &nb_sectors);
1200     s->nb_sectors = nb_sectors;
1201 
1202     /*
1203      * First indicate to the guest that a CD has been removed.  That's
1204      * done on the next command the guest sends us.
1205      *
1206      * Then we set UNIT_ATTENTION, by which the guest will
1207      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1208      */
1209     s->cdrom_changed = 1;
1210     s->events.new_media = true;
1211     s->events.eject_request = false;
1212     ide_bus_set_irq(s->bus);
1213 }
1214 
1215 static void ide_cd_eject_request_cb(void *opaque, bool force)
1216 {
1217     IDEState *s = opaque;
1218 
1219     s->events.eject_request = true;
1220     if (force) {
1221         s->tray_locked = false;
1222     }
1223     ide_bus_set_irq(s->bus);
1224 }
1225 
1226 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1227 {
1228     s->lba48 = lba48;
1229 
1230     /* handle the 'magic' 0 nsector count conversion here. to avoid
1231      * fiddling with the rest of the read logic, we just store the
1232      * full sector count in ->nsector and ignore ->hob_nsector from now
1233      */
1234     if (!s->lba48) {
1235         if (!s->nsector)
1236             s->nsector = 256;
1237     } else {
1238         if (!s->nsector && !s->hob_nsector)
1239             s->nsector = 65536;
1240         else {
1241             int lo = s->nsector;
1242             int hi = s->hob_nsector;
1243 
1244             s->nsector = (hi << 8) | lo;
1245         }
1246     }
1247 }
1248 
1249 static void ide_clear_hob(IDEBus *bus)
1250 {
1251     /* any write clears HOB high bit of device control register */
1252     bus->cmd &= ~(IDE_CTRL_HOB);
1253 }
1254 
1255 /* IOport [W]rite [R]egisters */
1256 enum ATA_IOPORT_WR {
1257     ATA_IOPORT_WR_DATA = 0,
1258     ATA_IOPORT_WR_FEATURES = 1,
1259     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1260     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1261     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1262     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1263     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1264     ATA_IOPORT_WR_COMMAND = 7,
1265     ATA_IOPORT_WR_NUM_REGISTERS,
1266 };
1267 
1268 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1269     [ATA_IOPORT_WR_DATA] = "Data",
1270     [ATA_IOPORT_WR_FEATURES] = "Features",
1271     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1272     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1273     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1274     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1275     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1276     [ATA_IOPORT_WR_COMMAND] = "Command"
1277 };
1278 
1279 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1280 {
1281     IDEBus *bus = opaque;
1282     IDEState *s = ide_bus_active_if(bus);
1283     int reg_num = addr & 7;
1284 
1285     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1286 
1287     /* ignore writes to command block while busy with previous command */
1288     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1289         return;
1290     }
1291 
1292     /* NOTE: Device0 and Device1 both receive incoming register writes.
1293      * (They're on the same bus! They have to!) */
1294 
1295     switch (reg_num) {
1296     case 0:
1297         break;
1298     case ATA_IOPORT_WR_FEATURES:
1299         ide_clear_hob(bus);
1300         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1301         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1302         bus->ifs[0].feature = val;
1303         bus->ifs[1].feature = val;
1304         break;
1305     case ATA_IOPORT_WR_SECTOR_COUNT:
1306         ide_clear_hob(bus);
1307         bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1308         bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1309         bus->ifs[0].nsector = val;
1310         bus->ifs[1].nsector = val;
1311         break;
1312     case ATA_IOPORT_WR_SECTOR_NUMBER:
1313         ide_clear_hob(bus);
1314         bus->ifs[0].hob_sector = bus->ifs[0].sector;
1315         bus->ifs[1].hob_sector = bus->ifs[1].sector;
1316         bus->ifs[0].sector = val;
1317         bus->ifs[1].sector = val;
1318         break;
1319     case ATA_IOPORT_WR_CYLINDER_LOW:
1320         ide_clear_hob(bus);
1321         bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1322         bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1323         bus->ifs[0].lcyl = val;
1324         bus->ifs[1].lcyl = val;
1325         break;
1326     case ATA_IOPORT_WR_CYLINDER_HIGH:
1327         ide_clear_hob(bus);
1328         bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1329         bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1330         bus->ifs[0].hcyl = val;
1331         bus->ifs[1].hcyl = val;
1332         break;
1333     case ATA_IOPORT_WR_DEVICE_HEAD:
1334         ide_clear_hob(bus);
1335         bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1336         bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1337         /* select drive */
1338         bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1339         break;
1340     default:
1341     case ATA_IOPORT_WR_COMMAND:
1342         ide_clear_hob(bus);
1343         qemu_irq_lower(bus->irq);
1344         ide_bus_exec_cmd(bus, val);
1345         break;
1346     }
1347 }
1348 
1349 static void ide_reset(IDEState *s)
1350 {
1351     trace_ide_reset(s);
1352 
1353     if (s->pio_aiocb) {
1354         blk_aio_cancel(s->pio_aiocb);
1355         s->pio_aiocb = NULL;
1356     }
1357 
1358     if (s->reset_reverts) {
1359         s->reset_reverts = false;
1360         s->heads         = s->drive_heads;
1361         s->sectors       = s->drive_sectors;
1362     }
1363     if (s->drive_kind == IDE_CFATA)
1364         s->mult_sectors = 0;
1365     else
1366         s->mult_sectors = MAX_MULT_SECTORS;
1367     /* ide regs */
1368     s->feature = 0;
1369     s->error = 0;
1370     s->nsector = 0;
1371     s->sector = 0;
1372     s->lcyl = 0;
1373     s->hcyl = 0;
1374 
1375     /* lba48 */
1376     s->hob_feature = 0;
1377     s->hob_sector = 0;
1378     s->hob_nsector = 0;
1379     s->hob_lcyl = 0;
1380     s->hob_hcyl = 0;
1381 
1382     s->select = (ATA_DEV_ALWAYS_ON);
1383     s->status = READY_STAT | SEEK_STAT;
1384 
1385     s->lba48 = 0;
1386 
1387     /* ATAPI specific */
1388     s->sense_key = 0;
1389     s->asc = 0;
1390     s->cdrom_changed = 0;
1391     s->packet_transfer_size = 0;
1392     s->elementary_transfer_size = 0;
1393     s->io_buffer_index = 0;
1394     s->cd_sector_size = 0;
1395     s->atapi_dma = 0;
1396     s->tray_locked = 0;
1397     s->tray_open = 0;
1398     /* ATA DMA state */
1399     s->io_buffer_size = 0;
1400     s->req_nb_sectors = 0;
1401 
1402     ide_set_signature(s);
1403     /* init the transfer handler so that 0xffff is returned on data
1404        accesses */
1405     s->end_transfer_func = ide_dummy_transfer_stop;
1406     ide_dummy_transfer_stop(s);
1407     s->media_changed = 0;
1408 }
1409 
1410 static bool cmd_nop(IDEState *s, uint8_t cmd)
1411 {
1412     return true;
1413 }
1414 
1415 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1416 {
1417     /* Halt PIO (in the DRQ phase), then DMA */
1418     ide_transfer_halt(s);
1419     ide_cancel_dma_sync(s);
1420 
1421     /* Reset any PIO commands, reset signature, etc */
1422     ide_reset(s);
1423 
1424     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1425      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1426     s->status = 0x00;
1427 
1428     /* Do not overwrite status register */
1429     return false;
1430 }
1431 
1432 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1433 {
1434     switch (s->feature) {
1435     case DSM_TRIM:
1436         if (s->blk) {
1437             ide_sector_start_dma(s, IDE_DMA_TRIM);
1438             return false;
1439         }
1440         break;
1441     }
1442 
1443     ide_abort_command(s);
1444     return true;
1445 }
1446 
1447 static bool cmd_identify(IDEState *s, uint8_t cmd)
1448 {
1449     if (s->blk && s->drive_kind != IDE_CD) {
1450         if (s->drive_kind != IDE_CFATA) {
1451             ide_identify(s);
1452         } else {
1453             ide_cfata_identify(s);
1454         }
1455         s->status = READY_STAT | SEEK_STAT;
1456         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1457         ide_bus_set_irq(s->bus);
1458         return false;
1459     } else {
1460         if (s->drive_kind == IDE_CD) {
1461             ide_set_signature(s);
1462         }
1463         ide_abort_command(s);
1464     }
1465 
1466     return true;
1467 }
1468 
1469 static bool cmd_verify(IDEState *s, uint8_t cmd)
1470 {
1471     bool lba48 = (cmd == WIN_VERIFY_EXT);
1472 
1473     /* do sector number check ? */
1474     ide_cmd_lba48_transform(s, lba48);
1475 
1476     return true;
1477 }
1478 
1479 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1480 {
1481     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1482         /* Disable Read and Write Multiple */
1483         s->mult_sectors = 0;
1484     } else if ((s->nsector & 0xff) != 0 &&
1485         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1486          (s->nsector & (s->nsector - 1)) != 0)) {
1487         ide_abort_command(s);
1488     } else {
1489         s->mult_sectors = s->nsector & 0xff;
1490     }
1491 
1492     return true;
1493 }
1494 
1495 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1496 {
1497     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1498 
1499     if (!s->blk || !s->mult_sectors) {
1500         ide_abort_command(s);
1501         return true;
1502     }
1503 
1504     ide_cmd_lba48_transform(s, lba48);
1505     s->req_nb_sectors = s->mult_sectors;
1506     ide_sector_read(s);
1507     return false;
1508 }
1509 
1510 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1511 {
1512     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1513     int n;
1514 
1515     if (!s->blk || !s->mult_sectors) {
1516         ide_abort_command(s);
1517         return true;
1518     }
1519 
1520     ide_cmd_lba48_transform(s, lba48);
1521 
1522     s->req_nb_sectors = s->mult_sectors;
1523     n = MIN(s->nsector, s->req_nb_sectors);
1524 
1525     s->status = SEEK_STAT | READY_STAT;
1526     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1527 
1528     s->media_changed = 1;
1529 
1530     return false;
1531 }
1532 
1533 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1534 {
1535     bool lba48 = (cmd == WIN_READ_EXT);
1536 
1537     if (s->drive_kind == IDE_CD) {
1538         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1539         ide_abort_command(s);
1540         return true;
1541     }
1542 
1543     if (!s->blk) {
1544         ide_abort_command(s);
1545         return true;
1546     }
1547 
1548     ide_cmd_lba48_transform(s, lba48);
1549     s->req_nb_sectors = 1;
1550     ide_sector_read(s);
1551 
1552     return false;
1553 }
1554 
1555 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1556 {
1557     bool lba48 = (cmd == WIN_WRITE_EXT);
1558 
1559     if (!s->blk) {
1560         ide_abort_command(s);
1561         return true;
1562     }
1563 
1564     ide_cmd_lba48_transform(s, lba48);
1565 
1566     s->req_nb_sectors = 1;
1567     s->status = SEEK_STAT | READY_STAT;
1568     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1569 
1570     s->media_changed = 1;
1571 
1572     return false;
1573 }
1574 
1575 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1576 {
1577     bool lba48 = (cmd == WIN_READDMA_EXT);
1578 
1579     if (!s->blk) {
1580         ide_abort_command(s);
1581         return true;
1582     }
1583 
1584     ide_cmd_lba48_transform(s, lba48);
1585     ide_sector_start_dma(s, IDE_DMA_READ);
1586 
1587     return false;
1588 }
1589 
1590 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1591 {
1592     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1593 
1594     if (!s->blk) {
1595         ide_abort_command(s);
1596         return true;
1597     }
1598 
1599     ide_cmd_lba48_transform(s, lba48);
1600     ide_sector_start_dma(s, IDE_DMA_WRITE);
1601 
1602     s->media_changed = 1;
1603 
1604     return false;
1605 }
1606 
1607 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1608 {
1609     ide_flush_cache(s);
1610     return false;
1611 }
1612 
1613 static bool cmd_seek(IDEState *s, uint8_t cmd)
1614 {
1615     /* XXX: Check that seek is within bounds */
1616     return true;
1617 }
1618 
1619 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1620 {
1621     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1622 
1623     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1624     if (s->nb_sectors == 0) {
1625         ide_abort_command(s);
1626     } else {
1627         /*
1628          * Save the active drive parameters, which may have been
1629          * limited from their native counterparts by, e.g., INITIALIZE
1630          * DEVICE PARAMETERS or SET MAX ADDRESS.
1631          */
1632         const int aheads = s->heads;
1633         const int asectors = s->sectors;
1634 
1635         s->heads = s->drive_heads;
1636         s->sectors = s->drive_sectors;
1637 
1638         ide_cmd_lba48_transform(s, lba48);
1639         ide_set_sector(s, s->nb_sectors - 1);
1640 
1641         s->heads = aheads;
1642         s->sectors = asectors;
1643     }
1644 
1645     return true;
1646 }
1647 
1648 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1649 {
1650     s->nsector = 0xff; /* device active or idle */
1651     return true;
1652 }
1653 
1654 /* INITIALIZE DEVICE PARAMETERS */
1655 static bool cmd_specify(IDEState *s, uint8_t cmd)
1656 {
1657     if (s->blk && s->drive_kind != IDE_CD) {
1658         s->heads = (s->select & (ATA_DEV_HS)) + 1;
1659         s->sectors = s->nsector;
1660         ide_bus_set_irq(s->bus);
1661     } else {
1662         ide_abort_command(s);
1663     }
1664 
1665     return true;
1666 }
1667 
1668 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1669 {
1670     uint16_t *identify_data;
1671 
1672     if (!s->blk) {
1673         ide_abort_command(s);
1674         return true;
1675     }
1676 
1677     /* XXX: valid for CDROM ? */
1678     switch (s->feature) {
1679     case 0x01: /* 8-bit I/O enable (CompactFlash) */
1680     case 0x81: /* 8-bit I/O disable (CompactFlash) */
1681         if (s->drive_kind != IDE_CFATA) {
1682             goto abort_cmd;
1683         }
1684         s->io8 = !(s->feature & 0x80);
1685         return true;
1686     case 0x02: /* write cache enable */
1687         blk_set_enable_write_cache(s->blk, true);
1688         identify_data = (uint16_t *)s->identify_data;
1689         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1690         return true;
1691     case 0x82: /* write cache disable */
1692         blk_set_enable_write_cache(s->blk, false);
1693         identify_data = (uint16_t *)s->identify_data;
1694         put_le16(identify_data + 85, (1 << 14) | 1);
1695         ide_flush_cache(s);
1696         return false;
1697     case 0xcc: /* reverting to power-on defaults enable */
1698         s->reset_reverts = true;
1699         return true;
1700     case 0x66: /* reverting to power-on defaults disable */
1701         s->reset_reverts = false;
1702         return true;
1703     case 0xaa: /* read look-ahead enable */
1704     case 0x55: /* read look-ahead disable */
1705     case 0x05: /* set advanced power management mode */
1706     case 0x85: /* disable advanced power management mode */
1707     case 0x69: /* NOP */
1708     case 0x67: /* NOP */
1709     case 0x96: /* NOP */
1710     case 0x9a: /* NOP */
1711     case 0x42: /* enable Automatic Acoustic Mode */
1712     case 0xc2: /* disable Automatic Acoustic Mode */
1713         return true;
1714     case 0x03: /* set transfer mode */
1715         {
1716             uint8_t val = s->nsector & 0x07;
1717             identify_data = (uint16_t *)s->identify_data;
1718 
1719             switch (s->nsector >> 3) {
1720             case 0x00: /* pio default */
1721             case 0x01: /* pio mode */
1722                 put_le16(identify_data + 62, 0x07);
1723                 put_le16(identify_data + 63, 0x07);
1724                 put_le16(identify_data + 88, 0x3f);
1725                 break;
1726             case 0x02: /* single word dma mode */
1727                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1728                 put_le16(identify_data + 63, 0x07);
1729                 put_le16(identify_data + 88, 0x3f);
1730                 break;
1731             case 0x04: /* mdma mode */
1732                 put_le16(identify_data + 62, 0x07);
1733                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1734                 put_le16(identify_data + 88, 0x3f);
1735                 break;
1736             case 0x08: /* udma mode */
1737                 put_le16(identify_data + 62, 0x07);
1738                 put_le16(identify_data + 63, 0x07);
1739                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1740                 break;
1741             default:
1742                 goto abort_cmd;
1743             }
1744             return true;
1745         }
1746     }
1747 
1748 abort_cmd:
1749     ide_abort_command(s);
1750     return true;
1751 }
1752 
1753 
1754 /*** ATAPI commands ***/
1755 
1756 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1757 {
1758     ide_atapi_identify(s);
1759     s->status = READY_STAT | SEEK_STAT;
1760     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1761     ide_bus_set_irq(s->bus);
1762     return false;
1763 }
1764 
1765 /* EXECUTE DEVICE DIAGNOSTIC */
1766 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1767 {
1768     /*
1769      * Clear the device register per the ATA (v6) specification,
1770      * because ide_set_signature does not clear LBA or drive bits.
1771      */
1772     s->select = (ATA_DEV_ALWAYS_ON);
1773     ide_set_signature(s);
1774 
1775     if (s->drive_kind == IDE_CD) {
1776         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1777                         * devices to return a clear status register
1778                         * with READY_STAT *not* set. */
1779         s->error = 0x01;
1780     } else {
1781         s->status = READY_STAT | SEEK_STAT;
1782         /* The bits of the error register are not as usual for this command!
1783          * They are part of the regular output (this is why ERR_STAT isn't set)
1784          * Device 0 passed, Device 1 passed or not present. */
1785         s->error = 0x01;
1786         ide_bus_set_irq(s->bus);
1787     }
1788 
1789     return false;
1790 }
1791 
1792 static bool cmd_packet(IDEState *s, uint8_t cmd)
1793 {
1794     /* overlapping commands not supported */
1795     if (s->feature & 0x02) {
1796         ide_abort_command(s);
1797         return true;
1798     }
1799 
1800     s->status = READY_STAT | SEEK_STAT;
1801     s->atapi_dma = s->feature & 1;
1802     if (s->atapi_dma) {
1803         s->dma_cmd = IDE_DMA_ATAPI;
1804     }
1805     s->nsector = 1;
1806     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1807                        ide_atapi_cmd);
1808     return false;
1809 }
1810 
1811 
1812 /*** CF-ATA commands ***/
1813 
1814 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1815 {
1816     s->error = 0x09;    /* miscellaneous error */
1817     s->status = READY_STAT | SEEK_STAT;
1818     ide_bus_set_irq(s->bus);
1819 
1820     return false;
1821 }
1822 
1823 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1824 {
1825     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1826      * required for Windows 8 to work with AHCI */
1827 
1828     if (cmd == CFA_WEAR_LEVEL) {
1829         s->nsector = 0;
1830     }
1831 
1832     if (cmd == CFA_ERASE_SECTORS) {
1833         s->media_changed = 1;
1834     }
1835 
1836     return true;
1837 }
1838 
1839 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1840 {
1841     s->status = READY_STAT | SEEK_STAT;
1842 
1843     memset(s->io_buffer, 0, 0x200);
1844     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1845     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1846     s->io_buffer[0x02] = s->select;                 /* Head */
1847     s->io_buffer[0x03] = s->sector;                 /* Sector */
1848     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1849     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1850     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1851     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1852     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1853     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1854     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1855 
1856     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1857     ide_bus_set_irq(s->bus);
1858 
1859     return false;
1860 }
1861 
1862 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1863 {
1864     switch (s->feature) {
1865     case 0x02:  /* Inquiry Metadata Storage */
1866         ide_cfata_metadata_inquiry(s);
1867         break;
1868     case 0x03:  /* Read Metadata Storage */
1869         ide_cfata_metadata_read(s);
1870         break;
1871     case 0x04:  /* Write Metadata Storage */
1872         ide_cfata_metadata_write(s);
1873         break;
1874     default:
1875         ide_abort_command(s);
1876         return true;
1877     }
1878 
1879     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1880     s->status = 0x00; /* NOTE: READY is _not_ set */
1881     ide_bus_set_irq(s->bus);
1882 
1883     return false;
1884 }
1885 
1886 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1887 {
1888     switch (s->feature) {
1889     case 0x01:  /* sense temperature in device */
1890         s->nsector = 0x50;      /* +20 C */
1891         break;
1892     default:
1893         ide_abort_command(s);
1894         return true;
1895     }
1896 
1897     return true;
1898 }
1899 
1900 
1901 /*** SMART commands ***/
1902 
1903 static bool cmd_smart(IDEState *s, uint8_t cmd)
1904 {
1905     int n;
1906 
1907     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1908         goto abort_cmd;
1909     }
1910 
1911     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1912         goto abort_cmd;
1913     }
1914 
1915     switch (s->feature) {
1916     case SMART_DISABLE:
1917         s->smart_enabled = 0;
1918         return true;
1919 
1920     case SMART_ENABLE:
1921         s->smart_enabled = 1;
1922         return true;
1923 
1924     case SMART_ATTR_AUTOSAVE:
1925         switch (s->sector) {
1926         case 0x00:
1927             s->smart_autosave = 0;
1928             break;
1929         case 0xf1:
1930             s->smart_autosave = 1;
1931             break;
1932         default:
1933             goto abort_cmd;
1934         }
1935         return true;
1936 
1937     case SMART_STATUS:
1938         if (!s->smart_errors) {
1939             s->hcyl = 0xc2;
1940             s->lcyl = 0x4f;
1941         } else {
1942             s->hcyl = 0x2c;
1943             s->lcyl = 0xf4;
1944         }
1945         return true;
1946 
1947     case SMART_READ_THRESH:
1948         memset(s->io_buffer, 0, 0x200);
1949         s->io_buffer[0] = 0x01; /* smart struct version */
1950 
1951         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1952             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1953             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1954         }
1955 
1956         /* checksum */
1957         for (n = 0; n < 511; n++) {
1958             s->io_buffer[511] += s->io_buffer[n];
1959         }
1960         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1961 
1962         s->status = READY_STAT | SEEK_STAT;
1963         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1964         ide_bus_set_irq(s->bus);
1965         return false;
1966 
1967     case SMART_READ_DATA:
1968         memset(s->io_buffer, 0, 0x200);
1969         s->io_buffer[0] = 0x01; /* smart struct version */
1970 
1971         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1972             int i;
1973             for (i = 0; i < 11; i++) {
1974                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1975             }
1976         }
1977 
1978         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1979         if (s->smart_selftest_count == 0) {
1980             s->io_buffer[363] = 0;
1981         } else {
1982             s->io_buffer[363] =
1983                 s->smart_selftest_data[3 +
1984                            (s->smart_selftest_count - 1) *
1985                            24];
1986         }
1987         s->io_buffer[364] = 0x20;
1988         s->io_buffer[365] = 0x01;
1989         /* offline data collection capacity: execute + self-test*/
1990         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1991         s->io_buffer[368] = 0x03; /* smart capability (1) */
1992         s->io_buffer[369] = 0x00; /* smart capability (2) */
1993         s->io_buffer[370] = 0x01; /* error logging supported */
1994         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1995         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1996         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1997 
1998         for (n = 0; n < 511; n++) {
1999             s->io_buffer[511] += s->io_buffer[n];
2000         }
2001         s->io_buffer[511] = 0x100 - s->io_buffer[511];
2002 
2003         s->status = READY_STAT | SEEK_STAT;
2004         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2005         ide_bus_set_irq(s->bus);
2006         return false;
2007 
2008     case SMART_READ_LOG:
2009         switch (s->sector) {
2010         case 0x01: /* summary smart error log */
2011             memset(s->io_buffer, 0, 0x200);
2012             s->io_buffer[0] = 0x01;
2013             s->io_buffer[1] = 0x00; /* no error entries */
2014             s->io_buffer[452] = s->smart_errors & 0xff;
2015             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
2016 
2017             for (n = 0; n < 511; n++) {
2018                 s->io_buffer[511] += s->io_buffer[n];
2019             }
2020             s->io_buffer[511] = 0x100 - s->io_buffer[511];
2021             break;
2022         case 0x06: /* smart self test log */
2023             memset(s->io_buffer, 0, 0x200);
2024             s->io_buffer[0] = 0x01;
2025             if (s->smart_selftest_count == 0) {
2026                 s->io_buffer[508] = 0;
2027             } else {
2028                 s->io_buffer[508] = s->smart_selftest_count;
2029                 for (n = 2; n < 506; n++)  {
2030                     s->io_buffer[n] = s->smart_selftest_data[n];
2031                 }
2032             }
2033 
2034             for (n = 0; n < 511; n++) {
2035                 s->io_buffer[511] += s->io_buffer[n];
2036             }
2037             s->io_buffer[511] = 0x100 - s->io_buffer[511];
2038             break;
2039         default:
2040             goto abort_cmd;
2041         }
2042         s->status = READY_STAT | SEEK_STAT;
2043         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2044         ide_bus_set_irq(s->bus);
2045         return false;
2046 
2047     case SMART_EXECUTE_OFFLINE:
2048         switch (s->sector) {
2049         case 0: /* off-line routine */
2050         case 1: /* short self test */
2051         case 2: /* extended self test */
2052             s->smart_selftest_count++;
2053             if (s->smart_selftest_count > 21) {
2054                 s->smart_selftest_count = 1;
2055             }
2056             n = 2 + (s->smart_selftest_count - 1) * 24;
2057             s->smart_selftest_data[n] = s->sector;
2058             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
2059             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
2060             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
2061             break;
2062         default:
2063             goto abort_cmd;
2064         }
2065         return true;
2066     }
2067 
2068 abort_cmd:
2069     ide_abort_command(s);
2070     return true;
2071 }
2072 
2073 #define HD_OK (1u << IDE_HD)
2074 #define CD_OK (1u << IDE_CD)
2075 #define CFA_OK (1u << IDE_CFATA)
2076 #define HD_CFA_OK (HD_OK | CFA_OK)
2077 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2078 
2079 /* Set the Disk Seek Completed status bit during completion */
2080 #define SET_DSC (1u << 8)
2081 
2082 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2083 static const struct {
2084     /* Returns true if the completion code should be run */
2085     bool (*handler)(IDEState *s, uint8_t cmd);
2086     int flags;
2087 } ide_cmd_table[0x100] = {
2088     /* NOP not implemented, mandatory for CD */
2089     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
2090     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2091     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2092     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2093     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2094     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2095     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2096     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2097     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2098     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2099     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2100     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2101     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2102     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2103     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2104     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2105     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2106     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2107     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2108     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2109     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2110     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2111     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2112     [WIN_SPECIFY]                 = { cmd_specify, HD_CFA_OK | SET_DSC },
2113     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2114     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2115     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2116     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2117     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2118     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2119     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2120     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2121     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2122     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2123     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2124     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2125     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2126     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2127     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2128     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2129     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2130     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2131     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2132     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2133     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2134     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2135     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2136     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2137     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2138     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2139     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2140     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2141     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2142     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2143     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2144     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2145 };
2146 
2147 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2148 {
2149     return cmd < ARRAY_SIZE(ide_cmd_table)
2150         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2151 }
2152 
2153 void ide_bus_exec_cmd(IDEBus *bus, uint32_t val)
2154 {
2155     IDEState *s;
2156     bool complete;
2157 
2158     s = ide_bus_active_if(bus);
2159     trace_ide_bus_exec_cmd(bus, s, val);
2160 
2161     /* ignore commands to non existent slave */
2162     if (s != bus->ifs && !s->blk) {
2163         return;
2164     }
2165 
2166     /* Only RESET is allowed while BSY and/or DRQ are set,
2167      * and only to ATAPI devices. */
2168     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2169         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2170             return;
2171         }
2172     }
2173 
2174     if (!ide_cmd_permitted(s, val)) {
2175         ide_abort_command(s);
2176         ide_bus_set_irq(s->bus);
2177         return;
2178     }
2179 
2180     s->status = READY_STAT | BUSY_STAT;
2181     s->error = 0;
2182     s->io_buffer_offset = 0;
2183 
2184     complete = ide_cmd_table[val].handler(s, val);
2185     if (complete) {
2186         s->status &= ~BUSY_STAT;
2187         assert(!!s->error == !!(s->status & ERR_STAT));
2188 
2189         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2190             s->status |= SEEK_STAT;
2191         }
2192 
2193         ide_cmd_done(s);
2194         ide_bus_set_irq(s->bus);
2195     }
2196 }
2197 
2198 /* IOport [R]ead [R]egisters */
2199 enum ATA_IOPORT_RR {
2200     ATA_IOPORT_RR_DATA = 0,
2201     ATA_IOPORT_RR_ERROR = 1,
2202     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2203     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2204     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2205     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2206     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2207     ATA_IOPORT_RR_STATUS = 7,
2208     ATA_IOPORT_RR_NUM_REGISTERS,
2209 };
2210 
2211 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2212     [ATA_IOPORT_RR_DATA] = "Data",
2213     [ATA_IOPORT_RR_ERROR] = "Error",
2214     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2215     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2216     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2217     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2218     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2219     [ATA_IOPORT_RR_STATUS] = "Status"
2220 };
2221 
2222 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2223 {
2224     IDEBus *bus = opaque;
2225     IDEState *s = ide_bus_active_if(bus);
2226     uint32_t reg_num;
2227     int ret, hob;
2228 
2229     reg_num = addr & 7;
2230     hob = bus->cmd & (IDE_CTRL_HOB);
2231     switch (reg_num) {
2232     case ATA_IOPORT_RR_DATA:
2233         /*
2234          * The pre-GRUB Solaris x86 bootloader relies upon inb
2235          * consuming a word from the drive's sector buffer.
2236          */
2237         ret = ide_data_readw(bus, addr) & 0xff;
2238         break;
2239     case ATA_IOPORT_RR_ERROR:
2240         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2241             (s != bus->ifs && !s->blk)) {
2242             ret = 0;
2243         } else if (!hob) {
2244             ret = s->error;
2245         } else {
2246             ret = s->hob_feature;
2247         }
2248         break;
2249     case ATA_IOPORT_RR_SECTOR_COUNT:
2250         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2251             ret = 0;
2252         } else if (!hob) {
2253             ret = s->nsector & 0xff;
2254         } else {
2255             ret = s->hob_nsector;
2256         }
2257         break;
2258     case ATA_IOPORT_RR_SECTOR_NUMBER:
2259         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2260             ret = 0;
2261         } else if (!hob) {
2262             ret = s->sector;
2263         } else {
2264             ret = s->hob_sector;
2265         }
2266         break;
2267     case ATA_IOPORT_RR_CYLINDER_LOW:
2268         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2269             ret = 0;
2270         } else if (!hob) {
2271             ret = s->lcyl;
2272         } else {
2273             ret = s->hob_lcyl;
2274         }
2275         break;
2276     case ATA_IOPORT_RR_CYLINDER_HIGH:
2277         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2278             ret = 0;
2279         } else if (!hob) {
2280             ret = s->hcyl;
2281         } else {
2282             ret = s->hob_hcyl;
2283         }
2284         break;
2285     case ATA_IOPORT_RR_DEVICE_HEAD:
2286         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2287             ret = 0;
2288         } else {
2289             ret = s->select;
2290         }
2291         break;
2292     default:
2293     case ATA_IOPORT_RR_STATUS:
2294         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2295             (s != bus->ifs && !s->blk)) {
2296             ret = 0;
2297         } else {
2298             ret = s->status;
2299         }
2300         qemu_irq_lower(bus->irq);
2301         break;
2302     }
2303 
2304     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2305     return ret;
2306 }
2307 
2308 uint32_t ide_status_read(void *opaque, uint32_t addr)
2309 {
2310     IDEBus *bus = opaque;
2311     IDEState *s = ide_bus_active_if(bus);
2312     int ret;
2313 
2314     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2315         (s != bus->ifs && !s->blk)) {
2316         ret = 0;
2317     } else {
2318         ret = s->status;
2319     }
2320 
2321     trace_ide_status_read(addr, ret, bus, s);
2322     return ret;
2323 }
2324 
2325 static void ide_perform_srst(IDEState *s)
2326 {
2327     s->status |= BUSY_STAT;
2328 
2329     /* Halt PIO (Via register state); PIO BH remains scheduled. */
2330     ide_transfer_halt(s);
2331 
2332     /* Cancel DMA -- may drain block device and invoke callbacks */
2333     ide_cancel_dma_sync(s);
2334 
2335     /* Cancel PIO callback, reset registers/signature, etc */
2336     ide_reset(s);
2337 
2338     /* perform diagnostic */
2339     cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2340 }
2341 
2342 static void ide_bus_perform_srst(void *opaque)
2343 {
2344     IDEBus *bus = opaque;
2345     IDEState *s;
2346     int i;
2347 
2348     for (i = 0; i < 2; i++) {
2349         s = &bus->ifs[i];
2350         ide_perform_srst(s);
2351     }
2352 
2353     bus->cmd &= ~IDE_CTRL_RESET;
2354 }
2355 
2356 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2357 {
2358     IDEBus *bus = opaque;
2359     IDEState *s;
2360     int i;
2361 
2362     trace_ide_ctrl_write(addr, val, bus);
2363 
2364     /* Device0 and Device1 each have their own control register,
2365      * but QEMU models it as just one register in the controller. */
2366     if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2367         for (i = 0; i < 2; i++) {
2368             s = &bus->ifs[i];
2369             s->status |= BUSY_STAT;
2370         }
2371         replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2372                                          ide_bus_perform_srst, bus);
2373     }
2374 
2375     bus->cmd = val;
2376 }
2377 
2378 /*
2379  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2380  * transferred from the device to the guest), false if it's a PIO in
2381  */
2382 static bool ide_is_pio_out(IDEState *s)
2383 {
2384     if (s->end_transfer_func == ide_sector_write ||
2385         s->end_transfer_func == ide_atapi_cmd) {
2386         return false;
2387     } else if (s->end_transfer_func == ide_sector_read ||
2388                s->end_transfer_func == ide_transfer_stop ||
2389                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2390                s->end_transfer_func == ide_dummy_transfer_stop) {
2391         return true;
2392     }
2393 
2394     abort();
2395 }
2396 
2397 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2398 {
2399     IDEBus *bus = opaque;
2400     IDEState *s = ide_bus_active_if(bus);
2401     uint8_t *p;
2402 
2403     trace_ide_data_writew(addr, val, bus, s);
2404 
2405     /* PIO data access allowed only when DRQ bit is set. The result of a write
2406      * during PIO out is indeterminate, just ignore it. */
2407     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2408         return;
2409     }
2410 
2411     p = s->data_ptr;
2412     if (s->io8) {
2413         if (p + 1 > s->data_end) {
2414             return;
2415         }
2416 
2417         *p++ = val;
2418     } else {
2419         if (p + 2 > s->data_end) {
2420             return;
2421         }
2422 
2423         *(uint16_t *)p = le16_to_cpu(val);
2424         p += 2;
2425     }
2426     s->data_ptr = p;
2427     if (p >= s->data_end) {
2428         s->status &= ~DRQ_STAT;
2429         s->end_transfer_func(s);
2430     }
2431 }
2432 
2433 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2434 {
2435     IDEBus *bus = opaque;
2436     IDEState *s = ide_bus_active_if(bus);
2437     uint8_t *p;
2438     int ret;
2439 
2440     /* PIO data access allowed only when DRQ bit is set. The result of a read
2441      * during PIO in is indeterminate, return 0 and don't move forward. */
2442     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2443         return 0;
2444     }
2445 
2446     p = s->data_ptr;
2447     if (s->io8) {
2448         if (p + 1 > s->data_end) {
2449             return 0;
2450         }
2451 
2452         ret = *p++;
2453     } else {
2454         if (p + 2 > s->data_end) {
2455             return 0;
2456         }
2457 
2458         ret = cpu_to_le16(*(uint16_t *)p);
2459         p += 2;
2460     }
2461     s->data_ptr = p;
2462     if (p >= s->data_end) {
2463         s->status &= ~DRQ_STAT;
2464         s->end_transfer_func(s);
2465     }
2466 
2467     trace_ide_data_readw(addr, ret, bus, s);
2468     return ret;
2469 }
2470 
2471 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2472 {
2473     IDEBus *bus = opaque;
2474     IDEState *s = ide_bus_active_if(bus);
2475     uint8_t *p;
2476 
2477     trace_ide_data_writel(addr, val, bus, s);
2478 
2479     /* PIO data access allowed only when DRQ bit is set. The result of a write
2480      * during PIO out is indeterminate, just ignore it. */
2481     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2482         return;
2483     }
2484 
2485     p = s->data_ptr;
2486     if (p + 4 > s->data_end) {
2487         return;
2488     }
2489 
2490     *(uint32_t *)p = le32_to_cpu(val);
2491     p += 4;
2492     s->data_ptr = p;
2493     if (p >= s->data_end) {
2494         s->status &= ~DRQ_STAT;
2495         s->end_transfer_func(s);
2496     }
2497 }
2498 
2499 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2500 {
2501     IDEBus *bus = opaque;
2502     IDEState *s = ide_bus_active_if(bus);
2503     uint8_t *p;
2504     int ret;
2505 
2506     /* PIO data access allowed only when DRQ bit is set. The result of a read
2507      * during PIO in is indeterminate, return 0 and don't move forward. */
2508     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2509         ret = 0;
2510         goto out;
2511     }
2512 
2513     p = s->data_ptr;
2514     if (p + 4 > s->data_end) {
2515         return 0;
2516     }
2517 
2518     ret = cpu_to_le32(*(uint32_t *)p);
2519     p += 4;
2520     s->data_ptr = p;
2521     if (p >= s->data_end) {
2522         s->status &= ~DRQ_STAT;
2523         s->end_transfer_func(s);
2524     }
2525 
2526 out:
2527     trace_ide_data_readl(addr, ret, bus, s);
2528     return ret;
2529 }
2530 
2531 static void ide_dummy_transfer_stop(IDEState *s)
2532 {
2533     s->data_ptr = s->io_buffer;
2534     s->data_end = s->io_buffer;
2535     s->io_buffer[0] = 0xff;
2536     s->io_buffer[1] = 0xff;
2537     s->io_buffer[2] = 0xff;
2538     s->io_buffer[3] = 0xff;
2539 }
2540 
2541 void ide_bus_reset(IDEBus *bus)
2542 {
2543     /* pending async DMA - needs the IDEState before it is reset */
2544     if (bus->dma->aiocb) {
2545         trace_ide_bus_reset_aio();
2546         blk_aio_cancel(bus->dma->aiocb);
2547         bus->dma->aiocb = NULL;
2548     }
2549 
2550     bus->unit = 0;
2551     bus->cmd = 0;
2552     ide_reset(&bus->ifs[0]);
2553     ide_reset(&bus->ifs[1]);
2554     ide_clear_hob(bus);
2555 
2556     /* reset dma provider too */
2557     if (bus->dma->ops->reset) {
2558         bus->dma->ops->reset(bus->dma);
2559     }
2560 }
2561 
2562 static bool ide_cd_is_tray_open(void *opaque)
2563 {
2564     return ((IDEState *)opaque)->tray_open;
2565 }
2566 
2567 static bool ide_cd_is_medium_locked(void *opaque)
2568 {
2569     return ((IDEState *)opaque)->tray_locked;
2570 }
2571 
2572 static void ide_resize_cb(void *opaque)
2573 {
2574     IDEState *s = opaque;
2575     uint64_t nb_sectors;
2576 
2577     if (!s->identify_set) {
2578         return;
2579     }
2580 
2581     blk_get_geometry(s->blk, &nb_sectors);
2582     s->nb_sectors = nb_sectors;
2583 
2584     /* Update the identify data buffer. */
2585     if (s->drive_kind == IDE_CFATA) {
2586         ide_cfata_identify_size(s);
2587     } else {
2588         /* IDE_CD uses a different set of callbacks entirely. */
2589         assert(s->drive_kind != IDE_CD);
2590         ide_identify_size(s);
2591     }
2592 }
2593 
2594 static const BlockDevOps ide_cd_block_ops = {
2595     .change_media_cb = ide_cd_change_cb,
2596     .eject_request_cb = ide_cd_eject_request_cb,
2597     .is_tray_open = ide_cd_is_tray_open,
2598     .is_medium_locked = ide_cd_is_medium_locked,
2599 };
2600 
2601 static const BlockDevOps ide_hd_block_ops = {
2602     .resize_cb = ide_resize_cb,
2603 };
2604 
2605 int ide_init_drive(IDEState *s, IDEDevice *dev, IDEDriveKind kind, Error **errp)
2606 {
2607     uint64_t nb_sectors;
2608 
2609     s->blk = dev->conf.blk;
2610     s->drive_kind = kind;
2611 
2612     blk_get_geometry(s->blk, &nb_sectors);
2613     s->win2k_install_hack = dev->win2k_install_hack;
2614     s->cylinders = dev->conf.cyls;
2615     s->heads = s->drive_heads = dev->conf.heads;
2616     s->sectors = s->drive_sectors = dev->conf.secs;
2617     s->chs_trans = dev->chs_trans;
2618     s->nb_sectors = nb_sectors;
2619     s->wwn = dev->wwn;
2620     /* The SMART values should be preserved across power cycles
2621        but they aren't.  */
2622     s->smart_enabled = 1;
2623     s->smart_autosave = 1;
2624     s->smart_errors = 0;
2625     s->smart_selftest_count = 0;
2626     if (kind == IDE_CD) {
2627         blk_set_dev_ops(s->blk, &ide_cd_block_ops, s);
2628     } else {
2629         if (!blk_is_inserted(s->blk)) {
2630             error_setg(errp, "Device needs media, but drive is empty");
2631             return -1;
2632         }
2633         if (!blk_is_writable(s->blk)) {
2634             error_setg(errp, "Can't use a read-only drive");
2635             return -1;
2636         }
2637         blk_set_dev_ops(s->blk, &ide_hd_block_ops, s);
2638     }
2639     if (dev->serial) {
2640         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), dev->serial);
2641     } else {
2642         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2643                  "QM%05d", s->drive_serial);
2644     }
2645     if (dev->model) {
2646         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), dev->model);
2647     } else {
2648         switch (kind) {
2649         case IDE_CD:
2650             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2651             break;
2652         case IDE_CFATA:
2653             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2654             break;
2655         default:
2656             strcpy(s->drive_model_str, "QEMU HARDDISK");
2657             break;
2658         }
2659     }
2660 
2661     if (dev->version) {
2662         pstrcpy(s->version, sizeof(s->version), dev->version);
2663     } else {
2664         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2665     }
2666 
2667     ide_reset(s);
2668     blk_iostatus_enable(s->blk);
2669     return 0;
2670 }
2671 
2672 static void ide_init1(IDEBus *bus, int unit)
2673 {
2674     static int drive_serial = 1;
2675     IDEState *s = &bus->ifs[unit];
2676 
2677     s->bus = bus;
2678     s->unit = unit;
2679     s->drive_serial = drive_serial++;
2680     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2681     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2682     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2683     memset(s->io_buffer, 0, s->io_buffer_total_len);
2684 
2685     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2686     memset(s->smart_selftest_data, 0, 512);
2687 
2688     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2689                                            ide_sector_write_timer_cb, s);
2690 }
2691 
2692 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2693 {
2694     return 0;
2695 }
2696 
2697 static void ide_nop(const IDEDMA *dma)
2698 {
2699 }
2700 
2701 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2702 {
2703     return 0;
2704 }
2705 
2706 static const IDEDMAOps ide_dma_nop_ops = {
2707     .prepare_buf    = ide_nop_int32,
2708     .restart_dma    = ide_nop,
2709     .rw_buf         = ide_nop_int,
2710 };
2711 
2712 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2713 {
2714     s->unit = s->bus->retry_unit;
2715     ide_set_sector(s, s->bus->retry_sector_num);
2716     s->nsector = s->bus->retry_nsector;
2717     s->bus->dma->ops->restart_dma(s->bus->dma);
2718     s->io_buffer_size = 0;
2719     s->dma_cmd = dma_cmd;
2720     ide_start_dma(s, ide_dma_cb);
2721 }
2722 
2723 static void ide_restart_bh(void *opaque)
2724 {
2725     IDEBus *bus = opaque;
2726     IDEState *s;
2727     bool is_read;
2728     int error_status;
2729 
2730     qemu_bh_delete(bus->bh);
2731     bus->bh = NULL;
2732 
2733     error_status = bus->error_status;
2734     if (bus->error_status == 0) {
2735         return;
2736     }
2737 
2738     s = ide_bus_active_if(bus);
2739     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2740 
2741     /* The error status must be cleared before resubmitting the request: The
2742      * request may fail again, and this case can only be distinguished if the
2743      * called function can set a new error status. */
2744     bus->error_status = 0;
2745 
2746     /* The HBA has generically asked to be kicked on retry */
2747     if (error_status & IDE_RETRY_HBA) {
2748         if (s->bus->dma->ops->restart) {
2749             s->bus->dma->ops->restart(s->bus->dma);
2750         }
2751     } else if (IS_IDE_RETRY_DMA(error_status)) {
2752         if (error_status & IDE_RETRY_TRIM) {
2753             ide_restart_dma(s, IDE_DMA_TRIM);
2754         } else {
2755             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2756         }
2757     } else if (IS_IDE_RETRY_PIO(error_status)) {
2758         if (is_read) {
2759             ide_sector_read(s);
2760         } else {
2761             ide_sector_write(s);
2762         }
2763     } else if (error_status & IDE_RETRY_FLUSH) {
2764         ide_flush_cache(s);
2765     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2766         assert(s->end_transfer_func == ide_atapi_cmd);
2767         ide_atapi_dma_restart(s);
2768     } else {
2769         abort();
2770     }
2771 }
2772 
2773 static void ide_restart_cb(void *opaque, bool running, RunState state)
2774 {
2775     IDEBus *bus = opaque;
2776 
2777     if (!running)
2778         return;
2779 
2780     if (!bus->bh) {
2781         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2782         qemu_bh_schedule(bus->bh);
2783     }
2784 }
2785 
2786 void ide_bus_register_restart_cb(IDEBus *bus)
2787 {
2788     if (bus->dma->ops->restart_dma) {
2789         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2790     }
2791 }
2792 
2793 static IDEDMA ide_dma_nop = {
2794     .ops = &ide_dma_nop_ops,
2795     .aiocb = NULL,
2796 };
2797 
2798 void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out)
2799 {
2800     int i;
2801 
2802     for(i = 0; i < 2; i++) {
2803         ide_init1(bus, i);
2804         ide_reset(&bus->ifs[i]);
2805     }
2806     bus->irq = irq_out;
2807     bus->dma = &ide_dma_nop;
2808 }
2809 
2810 void ide_bus_set_irq(IDEBus *bus)
2811 {
2812     if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) {
2813         qemu_irq_raise(bus->irq);
2814     }
2815 }
2816 
2817 void ide_exit(IDEState *s)
2818 {
2819     timer_free(s->sector_write_timer);
2820     qemu_vfree(s->smart_selftest_data);
2821     qemu_vfree(s->io_buffer);
2822 }
2823 
2824 static bool is_identify_set(void *opaque, int version_id)
2825 {
2826     IDEState *s = opaque;
2827 
2828     return s->identify_set != 0;
2829 }
2830 
2831 static EndTransferFunc* transfer_end_table[] = {
2832         ide_sector_read,
2833         ide_sector_write,
2834         ide_transfer_stop,
2835         ide_atapi_cmd_reply_end,
2836         ide_atapi_cmd,
2837         ide_dummy_transfer_stop,
2838 };
2839 
2840 static int transfer_end_table_idx(EndTransferFunc *fn)
2841 {
2842     int i;
2843 
2844     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2845         if (transfer_end_table[i] == fn)
2846             return i;
2847 
2848     return -1;
2849 }
2850 
2851 static int ide_drive_post_load(void *opaque, int version_id)
2852 {
2853     IDEState *s = opaque;
2854 
2855     if (s->blk && s->identify_set) {
2856         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2857     }
2858     return 0;
2859 }
2860 
2861 static int ide_drive_pio_post_load(void *opaque, int version_id)
2862 {
2863     IDEState *s = opaque;
2864 
2865     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2866         return -EINVAL;
2867     }
2868     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2869     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2870     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2871     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2872 
2873     return 0;
2874 }
2875 
2876 static int ide_drive_pio_pre_save(void *opaque)
2877 {
2878     IDEState *s = opaque;
2879     int idx;
2880 
2881     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2882     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2883 
2884     idx = transfer_end_table_idx(s->end_transfer_func);
2885     if (idx == -1) {
2886         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2887                         __func__);
2888         s->end_transfer_fn_idx = 2;
2889     } else {
2890         s->end_transfer_fn_idx = idx;
2891     }
2892 
2893     return 0;
2894 }
2895 
2896 static bool ide_drive_pio_state_needed(void *opaque)
2897 {
2898     IDEState *s = opaque;
2899 
2900     return ((s->status & DRQ_STAT) != 0)
2901         || (s->bus->error_status & IDE_RETRY_PIO);
2902 }
2903 
2904 static bool ide_tray_state_needed(void *opaque)
2905 {
2906     IDEState *s = opaque;
2907 
2908     return s->tray_open || s->tray_locked;
2909 }
2910 
2911 static bool ide_atapi_gesn_needed(void *opaque)
2912 {
2913     IDEState *s = opaque;
2914 
2915     return s->events.new_media || s->events.eject_request;
2916 }
2917 
2918 static bool ide_error_needed(void *opaque)
2919 {
2920     IDEBus *bus = opaque;
2921 
2922     return (bus->error_status != 0);
2923 }
2924 
2925 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2926 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2927     .name ="ide_drive/atapi/gesn_state",
2928     .version_id = 1,
2929     .minimum_version_id = 1,
2930     .needed = ide_atapi_gesn_needed,
2931     .fields = (const VMStateField[]) {
2932         VMSTATE_BOOL(events.new_media, IDEState),
2933         VMSTATE_BOOL(events.eject_request, IDEState),
2934         VMSTATE_END_OF_LIST()
2935     }
2936 };
2937 
2938 static const VMStateDescription vmstate_ide_tray_state = {
2939     .name = "ide_drive/tray_state",
2940     .version_id = 1,
2941     .minimum_version_id = 1,
2942     .needed = ide_tray_state_needed,
2943     .fields = (const VMStateField[]) {
2944         VMSTATE_BOOL(tray_open, IDEState),
2945         VMSTATE_BOOL(tray_locked, IDEState),
2946         VMSTATE_END_OF_LIST()
2947     }
2948 };
2949 
2950 static const VMStateDescription vmstate_ide_drive_pio_state = {
2951     .name = "ide_drive/pio_state",
2952     .version_id = 1,
2953     .minimum_version_id = 1,
2954     .pre_save = ide_drive_pio_pre_save,
2955     .post_load = ide_drive_pio_post_load,
2956     .needed = ide_drive_pio_state_needed,
2957     .fields = (const VMStateField[]) {
2958         VMSTATE_INT32(req_nb_sectors, IDEState),
2959         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2960                              vmstate_info_uint8, uint8_t),
2961         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2962         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2963         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2964         VMSTATE_INT32(elementary_transfer_size, IDEState),
2965         VMSTATE_INT32(packet_transfer_size, IDEState),
2966         VMSTATE_END_OF_LIST()
2967     }
2968 };
2969 
2970 const VMStateDescription vmstate_ide_drive = {
2971     .name = "ide_drive",
2972     .version_id = 3,
2973     .minimum_version_id = 0,
2974     .post_load = ide_drive_post_load,
2975     .fields = (const VMStateField[]) {
2976         VMSTATE_INT32(mult_sectors, IDEState),
2977         VMSTATE_INT32(identify_set, IDEState),
2978         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2979         VMSTATE_UINT8(feature, IDEState),
2980         VMSTATE_UINT8(error, IDEState),
2981         VMSTATE_UINT32(nsector, IDEState),
2982         VMSTATE_UINT8(sector, IDEState),
2983         VMSTATE_UINT8(lcyl, IDEState),
2984         VMSTATE_UINT8(hcyl, IDEState),
2985         VMSTATE_UINT8(hob_feature, IDEState),
2986         VMSTATE_UINT8(hob_sector, IDEState),
2987         VMSTATE_UINT8(hob_nsector, IDEState),
2988         VMSTATE_UINT8(hob_lcyl, IDEState),
2989         VMSTATE_UINT8(hob_hcyl, IDEState),
2990         VMSTATE_UINT8(select, IDEState),
2991         VMSTATE_UINT8(status, IDEState),
2992         VMSTATE_UINT8(lba48, IDEState),
2993         VMSTATE_UINT8(sense_key, IDEState),
2994         VMSTATE_UINT8(asc, IDEState),
2995         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2996         VMSTATE_END_OF_LIST()
2997     },
2998     .subsections = (const VMStateDescription * const []) {
2999         &vmstate_ide_drive_pio_state,
3000         &vmstate_ide_tray_state,
3001         &vmstate_ide_atapi_gesn_state,
3002         NULL
3003     }
3004 };
3005 
3006 static const VMStateDescription vmstate_ide_error_status = {
3007     .name ="ide_bus/error",
3008     .version_id = 2,
3009     .minimum_version_id = 1,
3010     .needed = ide_error_needed,
3011     .fields = (const VMStateField[]) {
3012         VMSTATE_INT32(error_status, IDEBus),
3013         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
3014         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
3015         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
3016         VMSTATE_END_OF_LIST()
3017     }
3018 };
3019 
3020 const VMStateDescription vmstate_ide_bus = {
3021     .name = "ide_bus",
3022     .version_id = 1,
3023     .minimum_version_id = 1,
3024     .fields = (const VMStateField[]) {
3025         VMSTATE_UINT8(cmd, IDEBus),
3026         VMSTATE_UINT8(unit, IDEBus),
3027         VMSTATE_END_OF_LIST()
3028     },
3029     .subsections = (const VMStateDescription * const []) {
3030         &vmstate_ide_error_status,
3031         NULL
3032     }
3033 };
3034 
3035 void ide_drive_get(DriveInfo **hd, int n)
3036 {
3037     int i;
3038 
3039     for (i = 0; i < n; i++) {
3040         hd[i] = drive_get_by_index(IF_IDE, i);
3041     }
3042 }
3043