xref: /openbmc/qemu/hw/ide/core.c (revision 0e39bb022b5fa8c11964968885f3263c02ce42b0)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 
38 #include "hw/ide/internal.h"
39 
40 /* These values were based on a Seagate ST3500418AS but have been modified
41    to make more sense in QEMU */
42 static const int smart_attributes[][12] = {
43     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
44     /* raw read error rate*/
45     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
46     /* spin up */
47     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
48     /* start stop count */
49     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
50     /* remapped sectors */
51     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
52     /* power on hours */
53     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54     /* power cycle count */
55     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* airflow-temperature-celsius */
57     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 };
59 
60 static void ide_dummy_transfer_stop(IDEState *s);
61 
62 static void padstr(char *str, const char *src, int len)
63 {
64     int i, v;
65     for(i = 0; i < len; i++) {
66         if (*src)
67             v = *src++;
68         else
69             v = ' ';
70         str[i^1] = v;
71     }
72 }
73 
74 static void put_le16(uint16_t *p, unsigned int v)
75 {
76     *p = cpu_to_le16(v);
77 }
78 
79 static void ide_identify_size(IDEState *s)
80 {
81     uint16_t *p = (uint16_t *)s->identify_data;
82     put_le16(p + 60, s->nb_sectors);
83     put_le16(p + 61, s->nb_sectors >> 16);
84     put_le16(p + 100, s->nb_sectors);
85     put_le16(p + 101, s->nb_sectors >> 16);
86     put_le16(p + 102, s->nb_sectors >> 32);
87     put_le16(p + 103, s->nb_sectors >> 48);
88 }
89 
90 static void ide_identify(IDEState *s)
91 {
92     uint16_t *p;
93     unsigned int oldsize;
94     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
95 
96     p = (uint16_t *)s->identify_data;
97     if (s->identify_set) {
98         goto fill_buffer;
99     }
100     memset(p, 0, sizeof(s->identify_data));
101 
102     put_le16(p + 0, 0x0040);
103     put_le16(p + 1, s->cylinders);
104     put_le16(p + 3, s->heads);
105     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
106     put_le16(p + 5, 512); /* XXX: retired, remove ? */
107     put_le16(p + 6, s->sectors);
108     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
109     put_le16(p + 20, 3); /* XXX: retired, remove ? */
110     put_le16(p + 21, 512); /* cache size in sectors */
111     put_le16(p + 22, 4); /* ecc bytes */
112     padstr((char *)(p + 23), s->version, 8); /* firmware version */
113     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
114 #if MAX_MULT_SECTORS > 1
115     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
116 #endif
117     put_le16(p + 48, 1); /* dword I/O */
118     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
119     put_le16(p + 51, 0x200); /* PIO transfer cycle */
120     put_le16(p + 52, 0x200); /* DMA transfer cycle */
121     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
122     put_le16(p + 54, s->cylinders);
123     put_le16(p + 55, s->heads);
124     put_le16(p + 56, s->sectors);
125     oldsize = s->cylinders * s->heads * s->sectors;
126     put_le16(p + 57, oldsize);
127     put_le16(p + 58, oldsize >> 16);
128     if (s->mult_sectors)
129         put_le16(p + 59, 0x100 | s->mult_sectors);
130     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
131     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
132     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
133     put_le16(p + 63, 0x07); /* mdma0-2 supported */
134     put_le16(p + 64, 0x03); /* pio3-4 supported */
135     put_le16(p + 65, 120);
136     put_le16(p + 66, 120);
137     put_le16(p + 67, 120);
138     put_le16(p + 68, 120);
139     if (dev && dev->conf.discard_granularity) {
140         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
141     }
142 
143     if (s->ncq_queues) {
144         put_le16(p + 75, s->ncq_queues - 1);
145         /* NCQ supported */
146         put_le16(p + 76, (1 << 8));
147     }
148 
149     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
150     put_le16(p + 81, 0x16); /* conforms to ata5 */
151     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
152     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
153     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
154     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
155     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
156     if (s->wwn) {
157         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
158     } else {
159         put_le16(p + 84, (1 << 14) | 0);
160     }
161     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
162     if (blk_enable_write_cache(s->blk)) {
163         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
164     } else {
165         put_le16(p + 85, (1 << 14) | 1);
166     }
167     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
168     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
169     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
170     if (s->wwn) {
171         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
172     } else {
173         put_le16(p + 87, (1 << 14) | 0);
174     }
175     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
176     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
177     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
178     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
179     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
180     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 
182     if (dev && dev->conf.physical_block_size)
183         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
184     if (s->wwn) {
185         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
186         put_le16(p + 108, s->wwn >> 48);
187         put_le16(p + 109, s->wwn >> 32);
188         put_le16(p + 110, s->wwn >> 16);
189         put_le16(p + 111, s->wwn);
190     }
191     if (dev && dev->conf.discard_granularity) {
192         put_le16(p + 169, 1); /* TRIM support */
193     }
194 
195     ide_identify_size(s);
196     s->identify_set = 1;
197 
198 fill_buffer:
199     memcpy(s->io_buffer, p, sizeof(s->identify_data));
200 }
201 
202 static void ide_atapi_identify(IDEState *s)
203 {
204     uint16_t *p;
205 
206     p = (uint16_t *)s->identify_data;
207     if (s->identify_set) {
208         goto fill_buffer;
209     }
210     memset(p, 0, sizeof(s->identify_data));
211 
212     /* Removable CDROM, 50us response, 12 byte packets */
213     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
214     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
215     put_le16(p + 20, 3); /* buffer type */
216     put_le16(p + 21, 512); /* cache size in sectors */
217     put_le16(p + 22, 4); /* ecc bytes */
218     padstr((char *)(p + 23), s->version, 8); /* firmware version */
219     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
220     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
221 #ifdef USE_DMA_CDROM
222     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
223     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
224     put_le16(p + 62, 7);  /* single word dma0-2 supported */
225     put_le16(p + 63, 7);  /* mdma0-2 supported */
226 #else
227     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
228     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
229     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
230 #endif
231     put_le16(p + 64, 3); /* pio3-4 supported */
232     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
233     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
234     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
235     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 
237     put_le16(p + 71, 30); /* in ns */
238     put_le16(p + 72, 30); /* in ns */
239 
240     if (s->ncq_queues) {
241         put_le16(p + 75, s->ncq_queues - 1);
242         /* NCQ supported */
243         put_le16(p + 76, (1 << 8));
244     }
245 
246     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
247     if (s->wwn) {
248         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
249         put_le16(p + 87, (1 << 8)); /* WWN enabled */
250     }
251 
252 #ifdef USE_DMA_CDROM
253     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
254 #endif
255 
256     if (s->wwn) {
257         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
258         put_le16(p + 108, s->wwn >> 48);
259         put_le16(p + 109, s->wwn >> 32);
260         put_le16(p + 110, s->wwn >> 16);
261         put_le16(p + 111, s->wwn);
262     }
263 
264     s->identify_set = 1;
265 
266 fill_buffer:
267     memcpy(s->io_buffer, p, sizeof(s->identify_data));
268 }
269 
270 static void ide_cfata_identify_size(IDEState *s)
271 {
272     uint16_t *p = (uint16_t *)s->identify_data;
273     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
274     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
275     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
276     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
277 }
278 
279 static void ide_cfata_identify(IDEState *s)
280 {
281     uint16_t *p;
282     uint32_t cur_sec;
283 
284     p = (uint16_t *)s->identify_data;
285     if (s->identify_set) {
286         goto fill_buffer;
287     }
288     memset(p, 0, sizeof(s->identify_data));
289 
290     cur_sec = s->cylinders * s->heads * s->sectors;
291 
292     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
293     put_le16(p + 1, s->cylinders);		/* Default cylinders */
294     put_le16(p + 3, s->heads);			/* Default heads */
295     put_le16(p + 6, s->sectors);		/* Default sectors per track */
296     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
297     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
298     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
299     put_le16(p + 22, 0x0004);			/* ECC bytes */
300     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
301     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
302 #if MAX_MULT_SECTORS > 1
303     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
304 #else
305     put_le16(p + 47, 0x0000);
306 #endif
307     put_le16(p + 49, 0x0f00);			/* Capabilities */
308     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
309     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
310     put_le16(p + 53, 0x0003);			/* Translation params valid */
311     put_le16(p + 54, s->cylinders);		/* Current cylinders */
312     put_le16(p + 55, s->heads);			/* Current heads */
313     put_le16(p + 56, s->sectors);		/* Current sectors */
314     put_le16(p + 57, cur_sec);			/* Current capacity */
315     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
316     if (s->mult_sectors)			/* Multiple sector setting */
317         put_le16(p + 59, 0x100 | s->mult_sectors);
318     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
319     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
320     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
321     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
322     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
323     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
324     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
325     put_le16(p + 82, 0x400c);			/* Command Set supported */
326     put_le16(p + 83, 0x7068);			/* Command Set supported */
327     put_le16(p + 84, 0x4000);			/* Features supported */
328     put_le16(p + 85, 0x000c);			/* Command Set enabled */
329     put_le16(p + 86, 0x7044);			/* Command Set enabled */
330     put_le16(p + 87, 0x4000);			/* Features enabled */
331     put_le16(p + 91, 0x4060);			/* Current APM level */
332     put_le16(p + 129, 0x0002);			/* Current features option */
333     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
334     put_le16(p + 131, 0x0001);			/* Initial power mode */
335     put_le16(p + 132, 0x0000);			/* User signature */
336     put_le16(p + 160, 0x8100);			/* Power requirement */
337     put_le16(p + 161, 0x8001);			/* CF command set */
338 
339     ide_cfata_identify_size(s);
340     s->identify_set = 1;
341 
342 fill_buffer:
343     memcpy(s->io_buffer, p, sizeof(s->identify_data));
344 }
345 
346 static void ide_set_signature(IDEState *s)
347 {
348     s->select &= 0xf0; /* clear head */
349     /* put signature */
350     s->nsector = 1;
351     s->sector = 1;
352     if (s->drive_kind == IDE_CD) {
353         s->lcyl = 0x14;
354         s->hcyl = 0xeb;
355     } else if (s->blk) {
356         s->lcyl = 0;
357         s->hcyl = 0;
358     } else {
359         s->lcyl = 0xff;
360         s->hcyl = 0xff;
361     }
362 }
363 
364 typedef struct TrimAIOCB {
365     BlockAIOCB common;
366     BlockBackend *blk;
367     QEMUBH *bh;
368     int ret;
369     QEMUIOVector *qiov;
370     BlockAIOCB *aiocb;
371     int i, j;
372 } TrimAIOCB;
373 
374 static void trim_aio_cancel(BlockAIOCB *acb)
375 {
376     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
377 
378     /* Exit the loop so ide_issue_trim_cb will not continue  */
379     iocb->j = iocb->qiov->niov - 1;
380     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
381 
382     iocb->ret = -ECANCELED;
383 
384     if (iocb->aiocb) {
385         blk_aio_cancel_async(iocb->aiocb);
386         iocb->aiocb = NULL;
387     }
388 }
389 
390 static const AIOCBInfo trim_aiocb_info = {
391     .aiocb_size         = sizeof(TrimAIOCB),
392     .cancel_async       = trim_aio_cancel,
393 };
394 
395 static void ide_trim_bh_cb(void *opaque)
396 {
397     TrimAIOCB *iocb = opaque;
398 
399     iocb->common.cb(iocb->common.opaque, iocb->ret);
400 
401     qemu_bh_delete(iocb->bh);
402     iocb->bh = NULL;
403     qemu_aio_unref(iocb);
404 }
405 
406 static void ide_issue_trim_cb(void *opaque, int ret)
407 {
408     TrimAIOCB *iocb = opaque;
409     if (ret >= 0) {
410         while (iocb->j < iocb->qiov->niov) {
411             int j = iocb->j;
412             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
413                 int i = iocb->i;
414                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
415 
416                 /* 6-byte LBA + 2-byte range per entry */
417                 uint64_t entry = le64_to_cpu(buffer[i]);
418                 uint64_t sector = entry & 0x0000ffffffffffffULL;
419                 uint16_t count = entry >> 48;
420 
421                 if (count == 0) {
422                     continue;
423                 }
424 
425                 /* Got an entry! Submit and exit.  */
426                 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
427                                                sector << BDRV_SECTOR_BITS,
428                                                count << BDRV_SECTOR_BITS,
429                                                ide_issue_trim_cb, opaque);
430                 return;
431             }
432 
433             iocb->j++;
434             iocb->i = -1;
435         }
436     } else {
437         iocb->ret = ret;
438     }
439 
440     iocb->aiocb = NULL;
441     if (iocb->bh) {
442         qemu_bh_schedule(iocb->bh);
443     }
444 }
445 
446 BlockAIOCB *ide_issue_trim(
447         int64_t offset, QEMUIOVector *qiov,
448         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
449 {
450     BlockBackend *blk = opaque;
451     TrimAIOCB *iocb;
452 
453     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
454     iocb->blk = blk;
455     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
456     iocb->ret = 0;
457     iocb->qiov = qiov;
458     iocb->i = -1;
459     iocb->j = 0;
460     ide_issue_trim_cb(iocb, 0);
461     return &iocb->common;
462 }
463 
464 void ide_abort_command(IDEState *s)
465 {
466     ide_transfer_stop(s);
467     s->status = READY_STAT | ERR_STAT;
468     s->error = ABRT_ERR;
469 }
470 
471 static void ide_set_retry(IDEState *s)
472 {
473     s->bus->retry_unit = s->unit;
474     s->bus->retry_sector_num = ide_get_sector(s);
475     s->bus->retry_nsector = s->nsector;
476 }
477 
478 static void ide_clear_retry(IDEState *s)
479 {
480     s->bus->retry_unit = -1;
481     s->bus->retry_sector_num = 0;
482     s->bus->retry_nsector = 0;
483 }
484 
485 /* prepare data transfer and tell what to do after */
486 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
487                         EndTransferFunc *end_transfer_func)
488 {
489     s->end_transfer_func = end_transfer_func;
490     s->data_ptr = buf;
491     s->data_end = buf + size;
492     ide_set_retry(s);
493     if (!(s->status & ERR_STAT)) {
494         s->status |= DRQ_STAT;
495     }
496     if (s->bus->dma->ops->start_transfer) {
497         s->bus->dma->ops->start_transfer(s->bus->dma);
498     }
499 }
500 
501 static void ide_cmd_done(IDEState *s)
502 {
503     if (s->bus->dma->ops->cmd_done) {
504         s->bus->dma->ops->cmd_done(s->bus->dma);
505     }
506 }
507 
508 static void ide_transfer_halt(IDEState *s,
509                               void(*end_transfer_func)(IDEState *),
510                               bool notify)
511 {
512     s->end_transfer_func = end_transfer_func;
513     s->data_ptr = s->io_buffer;
514     s->data_end = s->io_buffer;
515     s->status &= ~DRQ_STAT;
516     if (notify) {
517         ide_cmd_done(s);
518     }
519 }
520 
521 void ide_transfer_stop(IDEState *s)
522 {
523     ide_transfer_halt(s, ide_transfer_stop, true);
524 }
525 
526 static void ide_transfer_cancel(IDEState *s)
527 {
528     ide_transfer_halt(s, ide_transfer_cancel, false);
529 }
530 
531 int64_t ide_get_sector(IDEState *s)
532 {
533     int64_t sector_num;
534     if (s->select & 0x40) {
535         /* lba */
536 	if (!s->lba48) {
537 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
538 		(s->lcyl << 8) | s->sector;
539 	} else {
540 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
541 		((int64_t) s->hob_lcyl << 32) |
542 		((int64_t) s->hob_sector << 24) |
543 		((int64_t) s->hcyl << 16) |
544 		((int64_t) s->lcyl << 8) | s->sector;
545 	}
546     } else {
547         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
548             (s->select & 0x0f) * s->sectors + (s->sector - 1);
549     }
550     return sector_num;
551 }
552 
553 void ide_set_sector(IDEState *s, int64_t sector_num)
554 {
555     unsigned int cyl, r;
556     if (s->select & 0x40) {
557 	if (!s->lba48) {
558             s->select = (s->select & 0xf0) | (sector_num >> 24);
559             s->hcyl = (sector_num >> 16);
560             s->lcyl = (sector_num >> 8);
561             s->sector = (sector_num);
562 	} else {
563 	    s->sector = sector_num;
564 	    s->lcyl = sector_num >> 8;
565 	    s->hcyl = sector_num >> 16;
566 	    s->hob_sector = sector_num >> 24;
567 	    s->hob_lcyl = sector_num >> 32;
568 	    s->hob_hcyl = sector_num >> 40;
569 	}
570     } else {
571         cyl = sector_num / (s->heads * s->sectors);
572         r = sector_num % (s->heads * s->sectors);
573         s->hcyl = cyl >> 8;
574         s->lcyl = cyl;
575         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
576         s->sector = (r % s->sectors) + 1;
577     }
578 }
579 
580 static void ide_rw_error(IDEState *s) {
581     ide_abort_command(s);
582     ide_set_irq(s->bus);
583 }
584 
585 static bool ide_sect_range_ok(IDEState *s,
586                               uint64_t sector, uint64_t nb_sectors)
587 {
588     uint64_t total_sectors;
589 
590     blk_get_geometry(s->blk, &total_sectors);
591     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
592         return false;
593     }
594     return true;
595 }
596 
597 static void ide_buffered_readv_cb(void *opaque, int ret)
598 {
599     IDEBufferedRequest *req = opaque;
600     if (!req->orphaned) {
601         if (!ret) {
602             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
603                                 req->original_qiov->size);
604         }
605         req->original_cb(req->original_opaque, ret);
606     }
607     QLIST_REMOVE(req, list);
608     qemu_vfree(req->iov.iov_base);
609     g_free(req);
610 }
611 
612 #define MAX_BUFFERED_REQS 16
613 
614 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
615                                QEMUIOVector *iov, int nb_sectors,
616                                BlockCompletionFunc *cb, void *opaque)
617 {
618     BlockAIOCB *aioreq;
619     IDEBufferedRequest *req;
620     int c = 0;
621 
622     QLIST_FOREACH(req, &s->buffered_requests, list) {
623         c++;
624     }
625     if (c > MAX_BUFFERED_REQS) {
626         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
627     }
628 
629     req = g_new0(IDEBufferedRequest, 1);
630     req->original_qiov = iov;
631     req->original_cb = cb;
632     req->original_opaque = opaque;
633     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
634     req->iov.iov_len = iov->size;
635     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
636 
637     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
638                             &req->qiov, 0, ide_buffered_readv_cb, req);
639 
640     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
641     return aioreq;
642 }
643 
644 /**
645  * Cancel all pending DMA requests.
646  * Any buffered DMA requests are instantly canceled,
647  * but any pending unbuffered DMA requests must be waited on.
648  */
649 void ide_cancel_dma_sync(IDEState *s)
650 {
651     IDEBufferedRequest *req;
652 
653     /* First invoke the callbacks of all buffered requests
654      * and flag those requests as orphaned. Ideally there
655      * are no unbuffered (Scatter Gather DMA Requests or
656      * write requests) pending and we can avoid to drain. */
657     QLIST_FOREACH(req, &s->buffered_requests, list) {
658         if (!req->orphaned) {
659 #ifdef DEBUG_IDE
660             printf("%s: invoking cb %p of buffered request %p with"
661                    " -ECANCELED\n", __func__, req->original_cb, req);
662 #endif
663             req->original_cb(req->original_opaque, -ECANCELED);
664         }
665         req->orphaned = true;
666     }
667 
668     /*
669      * We can't cancel Scatter Gather DMA in the middle of the
670      * operation or a partial (not full) DMA transfer would reach
671      * the storage so we wait for completion instead (we beahve
672      * like if the DMA was completed by the time the guest trying
673      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
674      * set).
675      *
676      * In the future we'll be able to safely cancel the I/O if the
677      * whole DMA operation will be submitted to disk with a single
678      * aio operation with preadv/pwritev.
679      */
680     if (s->bus->dma->aiocb) {
681 #ifdef DEBUG_IDE
682         printf("%s: draining all remaining requests", __func__);
683 #endif
684         blk_drain(s->blk);
685         assert(s->bus->dma->aiocb == NULL);
686     }
687 }
688 
689 static void ide_sector_read(IDEState *s);
690 
691 static void ide_sector_read_cb(void *opaque, int ret)
692 {
693     IDEState *s = opaque;
694     int n;
695 
696     s->pio_aiocb = NULL;
697     s->status &= ~BUSY_STAT;
698 
699     if (ret == -ECANCELED) {
700         return;
701     }
702     if (ret != 0) {
703         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
704                                 IDE_RETRY_READ)) {
705             return;
706         }
707     }
708 
709     block_acct_done(blk_get_stats(s->blk), &s->acct);
710 
711     n = s->nsector;
712     if (n > s->req_nb_sectors) {
713         n = s->req_nb_sectors;
714     }
715 
716     ide_set_sector(s, ide_get_sector(s) + n);
717     s->nsector -= n;
718     /* Allow the guest to read the io_buffer */
719     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
720     ide_set_irq(s->bus);
721 }
722 
723 static void ide_sector_read(IDEState *s)
724 {
725     int64_t sector_num;
726     int n;
727 
728     s->status = READY_STAT | SEEK_STAT;
729     s->error = 0; /* not needed by IDE spec, but needed by Windows */
730     sector_num = ide_get_sector(s);
731     n = s->nsector;
732 
733     if (n == 0) {
734         ide_transfer_stop(s);
735         return;
736     }
737 
738     s->status |= BUSY_STAT;
739 
740     if (n > s->req_nb_sectors) {
741         n = s->req_nb_sectors;
742     }
743 
744 #if defined(DEBUG_IDE)
745     printf("sector=%" PRId64 "\n", sector_num);
746 #endif
747 
748     if (!ide_sect_range_ok(s, sector_num, n)) {
749         ide_rw_error(s);
750         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
751         return;
752     }
753 
754     s->iov.iov_base = s->io_buffer;
755     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
756     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
757 
758     block_acct_start(blk_get_stats(s->blk), &s->acct,
759                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
760     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
761                                       ide_sector_read_cb, s);
762 }
763 
764 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
765 {
766     if (s->bus->dma->ops->commit_buf) {
767         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
768     }
769     s->io_buffer_offset += tx_bytes;
770     qemu_sglist_destroy(&s->sg);
771 }
772 
773 void ide_set_inactive(IDEState *s, bool more)
774 {
775     s->bus->dma->aiocb = NULL;
776     ide_clear_retry(s);
777     if (s->bus->dma->ops->set_inactive) {
778         s->bus->dma->ops->set_inactive(s->bus->dma, more);
779     }
780     ide_cmd_done(s);
781 }
782 
783 void ide_dma_error(IDEState *s)
784 {
785     dma_buf_commit(s, 0);
786     ide_abort_command(s);
787     ide_set_inactive(s, false);
788     ide_set_irq(s->bus);
789 }
790 
791 int ide_handle_rw_error(IDEState *s, int error, int op)
792 {
793     bool is_read = (op & IDE_RETRY_READ) != 0;
794     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
795 
796     if (action == BLOCK_ERROR_ACTION_STOP) {
797         assert(s->bus->retry_unit == s->unit);
798         s->bus->error_status = op;
799     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
800         block_acct_failed(blk_get_stats(s->blk), &s->acct);
801         if (IS_IDE_RETRY_DMA(op)) {
802             ide_dma_error(s);
803         } else if (IS_IDE_RETRY_ATAPI(op)) {
804             ide_atapi_io_error(s, -error);
805         } else {
806             ide_rw_error(s);
807         }
808     }
809     blk_error_action(s->blk, action, is_read, error);
810     return action != BLOCK_ERROR_ACTION_IGNORE;
811 }
812 
813 static void ide_dma_cb(void *opaque, int ret)
814 {
815     IDEState *s = opaque;
816     int n;
817     int64_t sector_num;
818     uint64_t offset;
819     bool stay_active = false;
820 
821     if (ret == -ECANCELED) {
822         return;
823     }
824     if (ret < 0) {
825         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
826             s->bus->dma->aiocb = NULL;
827             return;
828         }
829     }
830 
831     n = s->io_buffer_size >> 9;
832     if (n > s->nsector) {
833         /* The PRDs were longer than needed for this request. Shorten them so
834          * we don't get a negative remainder. The Active bit must remain set
835          * after the request completes. */
836         n = s->nsector;
837         stay_active = true;
838     }
839 
840     sector_num = ide_get_sector(s);
841     if (n > 0) {
842         assert(n * 512 == s->sg.size);
843         dma_buf_commit(s, s->sg.size);
844         sector_num += n;
845         ide_set_sector(s, sector_num);
846         s->nsector -= n;
847     }
848 
849     /* end of transfer ? */
850     if (s->nsector == 0) {
851         s->status = READY_STAT | SEEK_STAT;
852         ide_set_irq(s->bus);
853         goto eot;
854     }
855 
856     /* launch next transfer */
857     n = s->nsector;
858     s->io_buffer_index = 0;
859     s->io_buffer_size = n * 512;
860     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
861         /* The PRDs were too short. Reset the Active bit, but don't raise an
862          * interrupt. */
863         s->status = READY_STAT | SEEK_STAT;
864         dma_buf_commit(s, 0);
865         goto eot;
866     }
867 
868 #ifdef DEBUG_AIO
869     printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
870            sector_num, n, s->dma_cmd);
871 #endif
872 
873     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
874         !ide_sect_range_ok(s, sector_num, n)) {
875         ide_dma_error(s);
876         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
877         return;
878     }
879 
880     offset = sector_num << BDRV_SECTOR_BITS;
881     switch (s->dma_cmd) {
882     case IDE_DMA_READ:
883         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
884                                           ide_dma_cb, s);
885         break;
886     case IDE_DMA_WRITE:
887         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
888                                            ide_dma_cb, s);
889         break;
890     case IDE_DMA_TRIM:
891         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
892                                         &s->sg, offset,
893                                         ide_issue_trim, s->blk, ide_dma_cb, s,
894                                         DMA_DIRECTION_TO_DEVICE);
895         break;
896     default:
897         abort();
898     }
899     return;
900 
901 eot:
902     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
903         block_acct_done(blk_get_stats(s->blk), &s->acct);
904     }
905     ide_set_inactive(s, stay_active);
906 }
907 
908 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
909 {
910     s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
911     s->io_buffer_size = 0;
912     s->dma_cmd = dma_cmd;
913 
914     switch (dma_cmd) {
915     case IDE_DMA_READ:
916         block_acct_start(blk_get_stats(s->blk), &s->acct,
917                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
918         break;
919     case IDE_DMA_WRITE:
920         block_acct_start(blk_get_stats(s->blk), &s->acct,
921                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
922         break;
923     default:
924         break;
925     }
926 
927     ide_start_dma(s, ide_dma_cb);
928 }
929 
930 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
931 {
932     s->io_buffer_index = 0;
933     ide_set_retry(s);
934     if (s->bus->dma->ops->start_dma) {
935         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
936     }
937 }
938 
939 static void ide_sector_write(IDEState *s);
940 
941 static void ide_sector_write_timer_cb(void *opaque)
942 {
943     IDEState *s = opaque;
944     ide_set_irq(s->bus);
945 }
946 
947 static void ide_sector_write_cb(void *opaque, int ret)
948 {
949     IDEState *s = opaque;
950     int n;
951 
952     if (ret == -ECANCELED) {
953         return;
954     }
955 
956     s->pio_aiocb = NULL;
957     s->status &= ~BUSY_STAT;
958 
959     if (ret != 0) {
960         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
961             return;
962         }
963     }
964 
965     block_acct_done(blk_get_stats(s->blk), &s->acct);
966 
967     n = s->nsector;
968     if (n > s->req_nb_sectors) {
969         n = s->req_nb_sectors;
970     }
971     s->nsector -= n;
972 
973     ide_set_sector(s, ide_get_sector(s) + n);
974     if (s->nsector == 0) {
975         /* no more sectors to write */
976         ide_transfer_stop(s);
977     } else {
978         int n1 = s->nsector;
979         if (n1 > s->req_nb_sectors) {
980             n1 = s->req_nb_sectors;
981         }
982         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
983                            ide_sector_write);
984     }
985 
986     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
987         /* It seems there is a bug in the Windows 2000 installer HDD
988            IDE driver which fills the disk with empty logs when the
989            IDE write IRQ comes too early. This hack tries to correct
990            that at the expense of slower write performances. Use this
991            option _only_ to install Windows 2000. You must disable it
992            for normal use. */
993         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
994                   (NANOSECONDS_PER_SECOND / 1000));
995     } else {
996         ide_set_irq(s->bus);
997     }
998 }
999 
1000 static void ide_sector_write(IDEState *s)
1001 {
1002     int64_t sector_num;
1003     int n;
1004 
1005     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1006     sector_num = ide_get_sector(s);
1007 #if defined(DEBUG_IDE)
1008     printf("sector=%" PRId64 "\n", sector_num);
1009 #endif
1010     n = s->nsector;
1011     if (n > s->req_nb_sectors) {
1012         n = s->req_nb_sectors;
1013     }
1014 
1015     if (!ide_sect_range_ok(s, sector_num, n)) {
1016         ide_rw_error(s);
1017         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1018         return;
1019     }
1020 
1021     s->iov.iov_base = s->io_buffer;
1022     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1023     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1024 
1025     block_acct_start(blk_get_stats(s->blk), &s->acct,
1026                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1027     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1028                                    &s->qiov, 0, ide_sector_write_cb, s);
1029 }
1030 
1031 static void ide_flush_cb(void *opaque, int ret)
1032 {
1033     IDEState *s = opaque;
1034 
1035     s->pio_aiocb = NULL;
1036 
1037     if (ret == -ECANCELED) {
1038         return;
1039     }
1040     if (ret < 0) {
1041         /* XXX: What sector number to set here? */
1042         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1043             return;
1044         }
1045     }
1046 
1047     if (s->blk) {
1048         block_acct_done(blk_get_stats(s->blk), &s->acct);
1049     }
1050     s->status = READY_STAT | SEEK_STAT;
1051     ide_cmd_done(s);
1052     ide_set_irq(s->bus);
1053 }
1054 
1055 static void ide_flush_cache(IDEState *s)
1056 {
1057     if (s->blk == NULL) {
1058         ide_flush_cb(s, 0);
1059         return;
1060     }
1061 
1062     s->status |= BUSY_STAT;
1063     ide_set_retry(s);
1064     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1065     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1066 }
1067 
1068 static void ide_cfata_metadata_inquiry(IDEState *s)
1069 {
1070     uint16_t *p;
1071     uint32_t spd;
1072 
1073     p = (uint16_t *) s->io_buffer;
1074     memset(p, 0, 0x200);
1075     spd = ((s->mdata_size - 1) >> 9) + 1;
1076 
1077     put_le16(p + 0, 0x0001);			/* Data format revision */
1078     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1079     put_le16(p + 2, s->media_changed);		/* Media status */
1080     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1081     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1082     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1083     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1084 }
1085 
1086 static void ide_cfata_metadata_read(IDEState *s)
1087 {
1088     uint16_t *p;
1089 
1090     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1091         s->status = ERR_STAT;
1092         s->error = ABRT_ERR;
1093         return;
1094     }
1095 
1096     p = (uint16_t *) s->io_buffer;
1097     memset(p, 0, 0x200);
1098 
1099     put_le16(p + 0, s->media_changed);		/* Media status */
1100     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1101                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1102                                     s->nsector << 9), 0x200 - 2));
1103 }
1104 
1105 static void ide_cfata_metadata_write(IDEState *s)
1106 {
1107     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1108         s->status = ERR_STAT;
1109         s->error = ABRT_ERR;
1110         return;
1111     }
1112 
1113     s->media_changed = 0;
1114 
1115     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1116                     s->io_buffer + 2,
1117                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1118                                     s->nsector << 9), 0x200 - 2));
1119 }
1120 
1121 /* called when the inserted state of the media has changed */
1122 static void ide_cd_change_cb(void *opaque, bool load)
1123 {
1124     IDEState *s = opaque;
1125     uint64_t nb_sectors;
1126 
1127     s->tray_open = !load;
1128     blk_get_geometry(s->blk, &nb_sectors);
1129     s->nb_sectors = nb_sectors;
1130 
1131     /*
1132      * First indicate to the guest that a CD has been removed.  That's
1133      * done on the next command the guest sends us.
1134      *
1135      * Then we set UNIT_ATTENTION, by which the guest will
1136      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1137      */
1138     s->cdrom_changed = 1;
1139     s->events.new_media = true;
1140     s->events.eject_request = false;
1141     ide_set_irq(s->bus);
1142 }
1143 
1144 static void ide_cd_eject_request_cb(void *opaque, bool force)
1145 {
1146     IDEState *s = opaque;
1147 
1148     s->events.eject_request = true;
1149     if (force) {
1150         s->tray_locked = false;
1151     }
1152     ide_set_irq(s->bus);
1153 }
1154 
1155 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1156 {
1157     s->lba48 = lba48;
1158 
1159     /* handle the 'magic' 0 nsector count conversion here. to avoid
1160      * fiddling with the rest of the read logic, we just store the
1161      * full sector count in ->nsector and ignore ->hob_nsector from now
1162      */
1163     if (!s->lba48) {
1164 	if (!s->nsector)
1165 	    s->nsector = 256;
1166     } else {
1167 	if (!s->nsector && !s->hob_nsector)
1168 	    s->nsector = 65536;
1169 	else {
1170 	    int lo = s->nsector;
1171 	    int hi = s->hob_nsector;
1172 
1173 	    s->nsector = (hi << 8) | lo;
1174 	}
1175     }
1176 }
1177 
1178 static void ide_clear_hob(IDEBus *bus)
1179 {
1180     /* any write clears HOB high bit of device control register */
1181     bus->ifs[0].select &= ~(1 << 7);
1182     bus->ifs[1].select &= ~(1 << 7);
1183 }
1184 
1185 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1186 {
1187     IDEBus *bus = opaque;
1188 
1189 #ifdef DEBUG_IDE
1190     printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1191 #endif
1192 
1193     addr &= 7;
1194 
1195     /* ignore writes to command block while busy with previous command */
1196     if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1197         return;
1198 
1199     switch(addr) {
1200     case 0:
1201         break;
1202     case 1:
1203 	ide_clear_hob(bus);
1204         /* NOTE: data is written to the two drives */
1205 	bus->ifs[0].hob_feature = bus->ifs[0].feature;
1206 	bus->ifs[1].hob_feature = bus->ifs[1].feature;
1207         bus->ifs[0].feature = val;
1208         bus->ifs[1].feature = val;
1209         break;
1210     case 2:
1211 	ide_clear_hob(bus);
1212 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1213 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1214         bus->ifs[0].nsector = val;
1215         bus->ifs[1].nsector = val;
1216         break;
1217     case 3:
1218 	ide_clear_hob(bus);
1219 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1220 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1221         bus->ifs[0].sector = val;
1222         bus->ifs[1].sector = val;
1223         break;
1224     case 4:
1225 	ide_clear_hob(bus);
1226 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1227 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1228         bus->ifs[0].lcyl = val;
1229         bus->ifs[1].lcyl = val;
1230         break;
1231     case 5:
1232 	ide_clear_hob(bus);
1233 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1234 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1235         bus->ifs[0].hcyl = val;
1236         bus->ifs[1].hcyl = val;
1237         break;
1238     case 6:
1239 	/* FIXME: HOB readback uses bit 7 */
1240         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1241         bus->ifs[1].select = (val | 0x10) | 0xa0;
1242         /* select drive */
1243         bus->unit = (val >> 4) & 1;
1244         break;
1245     default:
1246     case 7:
1247         /* command */
1248         ide_exec_cmd(bus, val);
1249         break;
1250     }
1251 }
1252 
1253 static void ide_reset(IDEState *s)
1254 {
1255 #ifdef DEBUG_IDE
1256     printf("ide: reset\n");
1257 #endif
1258 
1259     if (s->pio_aiocb) {
1260         blk_aio_cancel(s->pio_aiocb);
1261         s->pio_aiocb = NULL;
1262     }
1263 
1264     if (s->drive_kind == IDE_CFATA)
1265         s->mult_sectors = 0;
1266     else
1267         s->mult_sectors = MAX_MULT_SECTORS;
1268     /* ide regs */
1269     s->feature = 0;
1270     s->error = 0;
1271     s->nsector = 0;
1272     s->sector = 0;
1273     s->lcyl = 0;
1274     s->hcyl = 0;
1275 
1276     /* lba48 */
1277     s->hob_feature = 0;
1278     s->hob_sector = 0;
1279     s->hob_nsector = 0;
1280     s->hob_lcyl = 0;
1281     s->hob_hcyl = 0;
1282 
1283     s->select = 0xa0;
1284     s->status = READY_STAT | SEEK_STAT;
1285 
1286     s->lba48 = 0;
1287 
1288     /* ATAPI specific */
1289     s->sense_key = 0;
1290     s->asc = 0;
1291     s->cdrom_changed = 0;
1292     s->packet_transfer_size = 0;
1293     s->elementary_transfer_size = 0;
1294     s->io_buffer_index = 0;
1295     s->cd_sector_size = 0;
1296     s->atapi_dma = 0;
1297     s->tray_locked = 0;
1298     s->tray_open = 0;
1299     /* ATA DMA state */
1300     s->io_buffer_size = 0;
1301     s->req_nb_sectors = 0;
1302 
1303     ide_set_signature(s);
1304     /* init the transfer handler so that 0xffff is returned on data
1305        accesses */
1306     s->end_transfer_func = ide_dummy_transfer_stop;
1307     ide_dummy_transfer_stop(s);
1308     s->media_changed = 0;
1309 }
1310 
1311 static bool cmd_nop(IDEState *s, uint8_t cmd)
1312 {
1313     return true;
1314 }
1315 
1316 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1317 {
1318     /* Halt PIO (in the DRQ phase), then DMA */
1319     ide_transfer_cancel(s);
1320     ide_cancel_dma_sync(s);
1321 
1322     /* Reset any PIO commands, reset signature, etc */
1323     ide_reset(s);
1324 
1325     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1326      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1327     s->status = 0x00;
1328 
1329     /* Do not overwrite status register */
1330     return false;
1331 }
1332 
1333 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1334 {
1335     switch (s->feature) {
1336     case DSM_TRIM:
1337         if (s->blk) {
1338             ide_sector_start_dma(s, IDE_DMA_TRIM);
1339             return false;
1340         }
1341         break;
1342     }
1343 
1344     ide_abort_command(s);
1345     return true;
1346 }
1347 
1348 static bool cmd_identify(IDEState *s, uint8_t cmd)
1349 {
1350     if (s->blk && s->drive_kind != IDE_CD) {
1351         if (s->drive_kind != IDE_CFATA) {
1352             ide_identify(s);
1353         } else {
1354             ide_cfata_identify(s);
1355         }
1356         s->status = READY_STAT | SEEK_STAT;
1357         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1358         ide_set_irq(s->bus);
1359         return false;
1360     } else {
1361         if (s->drive_kind == IDE_CD) {
1362             ide_set_signature(s);
1363         }
1364         ide_abort_command(s);
1365     }
1366 
1367     return true;
1368 }
1369 
1370 static bool cmd_verify(IDEState *s, uint8_t cmd)
1371 {
1372     bool lba48 = (cmd == WIN_VERIFY_EXT);
1373 
1374     /* do sector number check ? */
1375     ide_cmd_lba48_transform(s, lba48);
1376 
1377     return true;
1378 }
1379 
1380 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1381 {
1382     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1383         /* Disable Read and Write Multiple */
1384         s->mult_sectors = 0;
1385     } else if ((s->nsector & 0xff) != 0 &&
1386         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1387          (s->nsector & (s->nsector - 1)) != 0)) {
1388         ide_abort_command(s);
1389     } else {
1390         s->mult_sectors = s->nsector & 0xff;
1391     }
1392 
1393     return true;
1394 }
1395 
1396 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1397 {
1398     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1399 
1400     if (!s->blk || !s->mult_sectors) {
1401         ide_abort_command(s);
1402         return true;
1403     }
1404 
1405     ide_cmd_lba48_transform(s, lba48);
1406     s->req_nb_sectors = s->mult_sectors;
1407     ide_sector_read(s);
1408     return false;
1409 }
1410 
1411 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1412 {
1413     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1414     int n;
1415 
1416     if (!s->blk || !s->mult_sectors) {
1417         ide_abort_command(s);
1418         return true;
1419     }
1420 
1421     ide_cmd_lba48_transform(s, lba48);
1422 
1423     s->req_nb_sectors = s->mult_sectors;
1424     n = MIN(s->nsector, s->req_nb_sectors);
1425 
1426     s->status = SEEK_STAT | READY_STAT;
1427     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1428 
1429     s->media_changed = 1;
1430 
1431     return false;
1432 }
1433 
1434 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1435 {
1436     bool lba48 = (cmd == WIN_READ_EXT);
1437 
1438     if (s->drive_kind == IDE_CD) {
1439         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1440         ide_abort_command(s);
1441         return true;
1442     }
1443 
1444     if (!s->blk) {
1445         ide_abort_command(s);
1446         return true;
1447     }
1448 
1449     ide_cmd_lba48_transform(s, lba48);
1450     s->req_nb_sectors = 1;
1451     ide_sector_read(s);
1452 
1453     return false;
1454 }
1455 
1456 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1457 {
1458     bool lba48 = (cmd == WIN_WRITE_EXT);
1459 
1460     if (!s->blk) {
1461         ide_abort_command(s);
1462         return true;
1463     }
1464 
1465     ide_cmd_lba48_transform(s, lba48);
1466 
1467     s->req_nb_sectors = 1;
1468     s->status = SEEK_STAT | READY_STAT;
1469     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1470 
1471     s->media_changed = 1;
1472 
1473     return false;
1474 }
1475 
1476 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1477 {
1478     bool lba48 = (cmd == WIN_READDMA_EXT);
1479 
1480     if (!s->blk) {
1481         ide_abort_command(s);
1482         return true;
1483     }
1484 
1485     ide_cmd_lba48_transform(s, lba48);
1486     ide_sector_start_dma(s, IDE_DMA_READ);
1487 
1488     return false;
1489 }
1490 
1491 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1492 {
1493     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1494 
1495     if (!s->blk) {
1496         ide_abort_command(s);
1497         return true;
1498     }
1499 
1500     ide_cmd_lba48_transform(s, lba48);
1501     ide_sector_start_dma(s, IDE_DMA_WRITE);
1502 
1503     s->media_changed = 1;
1504 
1505     return false;
1506 }
1507 
1508 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1509 {
1510     ide_flush_cache(s);
1511     return false;
1512 }
1513 
1514 static bool cmd_seek(IDEState *s, uint8_t cmd)
1515 {
1516     /* XXX: Check that seek is within bounds */
1517     return true;
1518 }
1519 
1520 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1521 {
1522     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1523 
1524     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1525     if (s->nb_sectors == 0) {
1526         ide_abort_command(s);
1527         return true;
1528     }
1529 
1530     ide_cmd_lba48_transform(s, lba48);
1531     ide_set_sector(s, s->nb_sectors - 1);
1532 
1533     return true;
1534 }
1535 
1536 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1537 {
1538     s->nsector = 0xff; /* device active or idle */
1539     return true;
1540 }
1541 
1542 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1543 {
1544     uint16_t *identify_data;
1545 
1546     if (!s->blk) {
1547         ide_abort_command(s);
1548         return true;
1549     }
1550 
1551     /* XXX: valid for CDROM ? */
1552     switch (s->feature) {
1553     case 0x02: /* write cache enable */
1554         blk_set_enable_write_cache(s->blk, true);
1555         identify_data = (uint16_t *)s->identify_data;
1556         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1557         return true;
1558     case 0x82: /* write cache disable */
1559         blk_set_enable_write_cache(s->blk, false);
1560         identify_data = (uint16_t *)s->identify_data;
1561         put_le16(identify_data + 85, (1 << 14) | 1);
1562         ide_flush_cache(s);
1563         return false;
1564     case 0xcc: /* reverting to power-on defaults enable */
1565     case 0x66: /* reverting to power-on defaults disable */
1566     case 0xaa: /* read look-ahead enable */
1567     case 0x55: /* read look-ahead disable */
1568     case 0x05: /* set advanced power management mode */
1569     case 0x85: /* disable advanced power management mode */
1570     case 0x69: /* NOP */
1571     case 0x67: /* NOP */
1572     case 0x96: /* NOP */
1573     case 0x9a: /* NOP */
1574     case 0x42: /* enable Automatic Acoustic Mode */
1575     case 0xc2: /* disable Automatic Acoustic Mode */
1576         return true;
1577     case 0x03: /* set transfer mode */
1578         {
1579             uint8_t val = s->nsector & 0x07;
1580             identify_data = (uint16_t *)s->identify_data;
1581 
1582             switch (s->nsector >> 3) {
1583             case 0x00: /* pio default */
1584             case 0x01: /* pio mode */
1585                 put_le16(identify_data + 62, 0x07);
1586                 put_le16(identify_data + 63, 0x07);
1587                 put_le16(identify_data + 88, 0x3f);
1588                 break;
1589             case 0x02: /* sigle word dma mode*/
1590                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1591                 put_le16(identify_data + 63, 0x07);
1592                 put_le16(identify_data + 88, 0x3f);
1593                 break;
1594             case 0x04: /* mdma mode */
1595                 put_le16(identify_data + 62, 0x07);
1596                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1597                 put_le16(identify_data + 88, 0x3f);
1598                 break;
1599             case 0x08: /* udma mode */
1600                 put_le16(identify_data + 62, 0x07);
1601                 put_le16(identify_data + 63, 0x07);
1602                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1603                 break;
1604             default:
1605                 goto abort_cmd;
1606             }
1607             return true;
1608         }
1609     }
1610 
1611 abort_cmd:
1612     ide_abort_command(s);
1613     return true;
1614 }
1615 
1616 
1617 /*** ATAPI commands ***/
1618 
1619 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1620 {
1621     ide_atapi_identify(s);
1622     s->status = READY_STAT | SEEK_STAT;
1623     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1624     ide_set_irq(s->bus);
1625     return false;
1626 }
1627 
1628 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1629 {
1630     ide_set_signature(s);
1631 
1632     if (s->drive_kind == IDE_CD) {
1633         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1634                         * devices to return a clear status register
1635                         * with READY_STAT *not* set. */
1636         s->error = 0x01;
1637     } else {
1638         s->status = READY_STAT | SEEK_STAT;
1639         /* The bits of the error register are not as usual for this command!
1640          * They are part of the regular output (this is why ERR_STAT isn't set)
1641          * Device 0 passed, Device 1 passed or not present. */
1642         s->error = 0x01;
1643         ide_set_irq(s->bus);
1644     }
1645 
1646     return false;
1647 }
1648 
1649 static bool cmd_packet(IDEState *s, uint8_t cmd)
1650 {
1651     /* overlapping commands not supported */
1652     if (s->feature & 0x02) {
1653         ide_abort_command(s);
1654         return true;
1655     }
1656 
1657     s->status = READY_STAT | SEEK_STAT;
1658     s->atapi_dma = s->feature & 1;
1659     if (s->atapi_dma) {
1660         s->dma_cmd = IDE_DMA_ATAPI;
1661     }
1662     s->nsector = 1;
1663     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1664                        ide_atapi_cmd);
1665     return false;
1666 }
1667 
1668 
1669 /*** CF-ATA commands ***/
1670 
1671 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1672 {
1673     s->error = 0x09;    /* miscellaneous error */
1674     s->status = READY_STAT | SEEK_STAT;
1675     ide_set_irq(s->bus);
1676 
1677     return false;
1678 }
1679 
1680 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1681 {
1682     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1683      * required for Windows 8 to work with AHCI */
1684 
1685     if (cmd == CFA_WEAR_LEVEL) {
1686         s->nsector = 0;
1687     }
1688 
1689     if (cmd == CFA_ERASE_SECTORS) {
1690         s->media_changed = 1;
1691     }
1692 
1693     return true;
1694 }
1695 
1696 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1697 {
1698     s->status = READY_STAT | SEEK_STAT;
1699 
1700     memset(s->io_buffer, 0, 0x200);
1701     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1702     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1703     s->io_buffer[0x02] = s->select;                 /* Head */
1704     s->io_buffer[0x03] = s->sector;                 /* Sector */
1705     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1706     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1707     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1708     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1709     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1710     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1711     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1712 
1713     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1714     ide_set_irq(s->bus);
1715 
1716     return false;
1717 }
1718 
1719 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1720 {
1721     switch (s->feature) {
1722     case 0x02:  /* Inquiry Metadata Storage */
1723         ide_cfata_metadata_inquiry(s);
1724         break;
1725     case 0x03:  /* Read Metadata Storage */
1726         ide_cfata_metadata_read(s);
1727         break;
1728     case 0x04:  /* Write Metadata Storage */
1729         ide_cfata_metadata_write(s);
1730         break;
1731     default:
1732         ide_abort_command(s);
1733         return true;
1734     }
1735 
1736     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1737     s->status = 0x00; /* NOTE: READY is _not_ set */
1738     ide_set_irq(s->bus);
1739 
1740     return false;
1741 }
1742 
1743 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1744 {
1745     switch (s->feature) {
1746     case 0x01:  /* sense temperature in device */
1747         s->nsector = 0x50;      /* +20 C */
1748         break;
1749     default:
1750         ide_abort_command(s);
1751         return true;
1752     }
1753 
1754     return true;
1755 }
1756 
1757 
1758 /*** SMART commands ***/
1759 
1760 static bool cmd_smart(IDEState *s, uint8_t cmd)
1761 {
1762     int n;
1763 
1764     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1765         goto abort_cmd;
1766     }
1767 
1768     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1769         goto abort_cmd;
1770     }
1771 
1772     switch (s->feature) {
1773     case SMART_DISABLE:
1774         s->smart_enabled = 0;
1775         return true;
1776 
1777     case SMART_ENABLE:
1778         s->smart_enabled = 1;
1779         return true;
1780 
1781     case SMART_ATTR_AUTOSAVE:
1782         switch (s->sector) {
1783         case 0x00:
1784             s->smart_autosave = 0;
1785             break;
1786         case 0xf1:
1787             s->smart_autosave = 1;
1788             break;
1789         default:
1790             goto abort_cmd;
1791         }
1792         return true;
1793 
1794     case SMART_STATUS:
1795         if (!s->smart_errors) {
1796             s->hcyl = 0xc2;
1797             s->lcyl = 0x4f;
1798         } else {
1799             s->hcyl = 0x2c;
1800             s->lcyl = 0xf4;
1801         }
1802         return true;
1803 
1804     case SMART_READ_THRESH:
1805         memset(s->io_buffer, 0, 0x200);
1806         s->io_buffer[0] = 0x01; /* smart struct version */
1807 
1808         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1809             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1810             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1811         }
1812 
1813         /* checksum */
1814         for (n = 0; n < 511; n++) {
1815             s->io_buffer[511] += s->io_buffer[n];
1816         }
1817         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1818 
1819         s->status = READY_STAT | SEEK_STAT;
1820         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1821         ide_set_irq(s->bus);
1822         return false;
1823 
1824     case SMART_READ_DATA:
1825         memset(s->io_buffer, 0, 0x200);
1826         s->io_buffer[0] = 0x01; /* smart struct version */
1827 
1828         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1829             int i;
1830             for (i = 0; i < 11; i++) {
1831                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1832             }
1833         }
1834 
1835         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1836         if (s->smart_selftest_count == 0) {
1837             s->io_buffer[363] = 0;
1838         } else {
1839             s->io_buffer[363] =
1840                 s->smart_selftest_data[3 +
1841                            (s->smart_selftest_count - 1) *
1842                            24];
1843         }
1844         s->io_buffer[364] = 0x20;
1845         s->io_buffer[365] = 0x01;
1846         /* offline data collection capacity: execute + self-test*/
1847         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1848         s->io_buffer[368] = 0x03; /* smart capability (1) */
1849         s->io_buffer[369] = 0x00; /* smart capability (2) */
1850         s->io_buffer[370] = 0x01; /* error logging supported */
1851         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1852         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1853         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1854 
1855         for (n = 0; n < 511; n++) {
1856             s->io_buffer[511] += s->io_buffer[n];
1857         }
1858         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1859 
1860         s->status = READY_STAT | SEEK_STAT;
1861         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1862         ide_set_irq(s->bus);
1863         return false;
1864 
1865     case SMART_READ_LOG:
1866         switch (s->sector) {
1867         case 0x01: /* summary smart error log */
1868             memset(s->io_buffer, 0, 0x200);
1869             s->io_buffer[0] = 0x01;
1870             s->io_buffer[1] = 0x00; /* no error entries */
1871             s->io_buffer[452] = s->smart_errors & 0xff;
1872             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1873 
1874             for (n = 0; n < 511; n++) {
1875                 s->io_buffer[511] += s->io_buffer[n];
1876             }
1877             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1878             break;
1879         case 0x06: /* smart self test log */
1880             memset(s->io_buffer, 0, 0x200);
1881             s->io_buffer[0] = 0x01;
1882             if (s->smart_selftest_count == 0) {
1883                 s->io_buffer[508] = 0;
1884             } else {
1885                 s->io_buffer[508] = s->smart_selftest_count;
1886                 for (n = 2; n < 506; n++)  {
1887                     s->io_buffer[n] = s->smart_selftest_data[n];
1888                 }
1889             }
1890 
1891             for (n = 0; n < 511; n++) {
1892                 s->io_buffer[511] += s->io_buffer[n];
1893             }
1894             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1895             break;
1896         default:
1897             goto abort_cmd;
1898         }
1899         s->status = READY_STAT | SEEK_STAT;
1900         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1901         ide_set_irq(s->bus);
1902         return false;
1903 
1904     case SMART_EXECUTE_OFFLINE:
1905         switch (s->sector) {
1906         case 0: /* off-line routine */
1907         case 1: /* short self test */
1908         case 2: /* extended self test */
1909             s->smart_selftest_count++;
1910             if (s->smart_selftest_count > 21) {
1911                 s->smart_selftest_count = 1;
1912             }
1913             n = 2 + (s->smart_selftest_count - 1) * 24;
1914             s->smart_selftest_data[n] = s->sector;
1915             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1916             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1917             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1918             break;
1919         default:
1920             goto abort_cmd;
1921         }
1922         return true;
1923     }
1924 
1925 abort_cmd:
1926     ide_abort_command(s);
1927     return true;
1928 }
1929 
1930 #define HD_OK (1u << IDE_HD)
1931 #define CD_OK (1u << IDE_CD)
1932 #define CFA_OK (1u << IDE_CFATA)
1933 #define HD_CFA_OK (HD_OK | CFA_OK)
1934 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1935 
1936 /* Set the Disk Seek Completed status bit during completion */
1937 #define SET_DSC (1u << 8)
1938 
1939 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1940 static const struct {
1941     /* Returns true if the completion code should be run */
1942     bool (*handler)(IDEState *s, uint8_t cmd);
1943     int flags;
1944 } ide_cmd_table[0x100] = {
1945     /* NOP not implemented, mandatory for CD */
1946     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1947     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1948     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1949     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1950     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1951     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1952     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1953     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1954     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1955     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1956     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1957     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1958     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1959     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1960     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
1961     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
1962     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
1963     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
1964     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
1965     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
1966     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
1967     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
1968     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
1969     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
1970     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
1971     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
1972     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
1973     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
1974     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1975     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
1976     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
1977     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
1978     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
1979     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1980     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1981     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
1982     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
1983     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1984     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
1985     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
1986     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
1987     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
1988     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
1989     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
1990     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
1991     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
1992     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
1993     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1994     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
1995     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
1996     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
1997     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
1998     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
1999     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2000     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2001     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2002 };
2003 
2004 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2005 {
2006     return cmd < ARRAY_SIZE(ide_cmd_table)
2007         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2008 }
2009 
2010 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2011 {
2012     IDEState *s;
2013     bool complete;
2014 
2015 #if defined(DEBUG_IDE)
2016     printf("ide: CMD=%02x\n", val);
2017 #endif
2018     s = idebus_active_if(bus);
2019     /* ignore commands to non existent slave */
2020     if (s != bus->ifs && !s->blk) {
2021         return;
2022     }
2023 
2024     /* Only RESET is allowed while BSY and/or DRQ are set,
2025      * and only to ATAPI devices. */
2026     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2027         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2028             return;
2029         }
2030     }
2031 
2032     if (!ide_cmd_permitted(s, val)) {
2033         ide_abort_command(s);
2034         ide_set_irq(s->bus);
2035         return;
2036     }
2037 
2038     s->status = READY_STAT | BUSY_STAT;
2039     s->error = 0;
2040     s->io_buffer_offset = 0;
2041 
2042     complete = ide_cmd_table[val].handler(s, val);
2043     if (complete) {
2044         s->status &= ~BUSY_STAT;
2045         assert(!!s->error == !!(s->status & ERR_STAT));
2046 
2047         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2048             s->status |= SEEK_STAT;
2049         }
2050 
2051         ide_cmd_done(s);
2052         ide_set_irq(s->bus);
2053     }
2054 }
2055 
2056 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
2057 {
2058     IDEBus *bus = opaque;
2059     IDEState *s = idebus_active_if(bus);
2060     uint32_t addr;
2061     int ret, hob;
2062 
2063     addr = addr1 & 7;
2064     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2065     //hob = s->select & (1 << 7);
2066     hob = 0;
2067     switch(addr) {
2068     case 0:
2069         ret = 0xff;
2070         break;
2071     case 1:
2072         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2073             (s != bus->ifs && !s->blk)) {
2074             ret = 0;
2075         } else if (!hob) {
2076             ret = s->error;
2077         } else {
2078 	    ret = s->hob_feature;
2079         }
2080         break;
2081     case 2:
2082         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2083             ret = 0;
2084         } else if (!hob) {
2085             ret = s->nsector & 0xff;
2086         } else {
2087 	    ret = s->hob_nsector;
2088         }
2089         break;
2090     case 3:
2091         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2092             ret = 0;
2093         } else if (!hob) {
2094             ret = s->sector;
2095         } else {
2096 	    ret = s->hob_sector;
2097         }
2098         break;
2099     case 4:
2100         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2101             ret = 0;
2102         } else if (!hob) {
2103             ret = s->lcyl;
2104         } else {
2105 	    ret = s->hob_lcyl;
2106         }
2107         break;
2108     case 5:
2109         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2110             ret = 0;
2111         } else if (!hob) {
2112             ret = s->hcyl;
2113         } else {
2114 	    ret = s->hob_hcyl;
2115         }
2116         break;
2117     case 6:
2118         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2119             ret = 0;
2120         } else {
2121             ret = s->select;
2122         }
2123         break;
2124     default:
2125     case 7:
2126         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2127             (s != bus->ifs && !s->blk)) {
2128             ret = 0;
2129         } else {
2130             ret = s->status;
2131         }
2132         qemu_irq_lower(bus->irq);
2133         break;
2134     }
2135 #ifdef DEBUG_IDE
2136     printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
2137 #endif
2138     return ret;
2139 }
2140 
2141 uint32_t ide_status_read(void *opaque, uint32_t addr)
2142 {
2143     IDEBus *bus = opaque;
2144     IDEState *s = idebus_active_if(bus);
2145     int ret;
2146 
2147     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2148         (s != bus->ifs && !s->blk)) {
2149         ret = 0;
2150     } else {
2151         ret = s->status;
2152     }
2153 #ifdef DEBUG_IDE
2154     printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
2155 #endif
2156     return ret;
2157 }
2158 
2159 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2160 {
2161     IDEBus *bus = opaque;
2162     IDEState *s;
2163     int i;
2164 
2165 #ifdef DEBUG_IDE
2166     printf("ide: write control addr=0x%x val=%02x\n", addr, val);
2167 #endif
2168     /* common for both drives */
2169     if (!(bus->cmd & IDE_CMD_RESET) &&
2170         (val & IDE_CMD_RESET)) {
2171         /* reset low to high */
2172         for(i = 0;i < 2; i++) {
2173             s = &bus->ifs[i];
2174             s->status = BUSY_STAT | SEEK_STAT;
2175             s->error = 0x01;
2176         }
2177     } else if ((bus->cmd & IDE_CMD_RESET) &&
2178                !(val & IDE_CMD_RESET)) {
2179         /* high to low */
2180         for(i = 0;i < 2; i++) {
2181             s = &bus->ifs[i];
2182             if (s->drive_kind == IDE_CD)
2183                 s->status = 0x00; /* NOTE: READY is _not_ set */
2184             else
2185                 s->status = READY_STAT | SEEK_STAT;
2186             ide_set_signature(s);
2187         }
2188     }
2189 
2190     bus->cmd = val;
2191 }
2192 
2193 /*
2194  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2195  * transferred from the device to the guest), false if it's a PIO in
2196  */
2197 static bool ide_is_pio_out(IDEState *s)
2198 {
2199     if (s->end_transfer_func == ide_sector_write ||
2200         s->end_transfer_func == ide_atapi_cmd) {
2201         return false;
2202     } else if (s->end_transfer_func == ide_sector_read ||
2203                s->end_transfer_func == ide_transfer_stop ||
2204                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2205                s->end_transfer_func == ide_dummy_transfer_stop) {
2206         return true;
2207     }
2208 
2209     abort();
2210 }
2211 
2212 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2213 {
2214     IDEBus *bus = opaque;
2215     IDEState *s = idebus_active_if(bus);
2216     uint8_t *p;
2217 
2218     /* PIO data access allowed only when DRQ bit is set. The result of a write
2219      * during PIO out is indeterminate, just ignore it. */
2220     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2221         return;
2222     }
2223 
2224     p = s->data_ptr;
2225     if (p + 2 > s->data_end) {
2226         return;
2227     }
2228 
2229     *(uint16_t *)p = le16_to_cpu(val);
2230     p += 2;
2231     s->data_ptr = p;
2232     if (p >= s->data_end) {
2233         s->status &= ~DRQ_STAT;
2234         s->end_transfer_func(s);
2235     }
2236 }
2237 
2238 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2239 {
2240     IDEBus *bus = opaque;
2241     IDEState *s = idebus_active_if(bus);
2242     uint8_t *p;
2243     int ret;
2244 
2245     /* PIO data access allowed only when DRQ bit is set. The result of a read
2246      * during PIO in is indeterminate, return 0 and don't move forward. */
2247     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2248         return 0;
2249     }
2250 
2251     p = s->data_ptr;
2252     if (p + 2 > s->data_end) {
2253         return 0;
2254     }
2255 
2256     ret = cpu_to_le16(*(uint16_t *)p);
2257     p += 2;
2258     s->data_ptr = p;
2259     if (p >= s->data_end) {
2260         s->status &= ~DRQ_STAT;
2261         s->end_transfer_func(s);
2262     }
2263     return ret;
2264 }
2265 
2266 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2267 {
2268     IDEBus *bus = opaque;
2269     IDEState *s = idebus_active_if(bus);
2270     uint8_t *p;
2271 
2272     /* PIO data access allowed only when DRQ bit is set. The result of a write
2273      * during PIO out is indeterminate, just ignore it. */
2274     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2275         return;
2276     }
2277 
2278     p = s->data_ptr;
2279     if (p + 4 > s->data_end) {
2280         return;
2281     }
2282 
2283     *(uint32_t *)p = le32_to_cpu(val);
2284     p += 4;
2285     s->data_ptr = p;
2286     if (p >= s->data_end) {
2287         s->status &= ~DRQ_STAT;
2288         s->end_transfer_func(s);
2289     }
2290 }
2291 
2292 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2293 {
2294     IDEBus *bus = opaque;
2295     IDEState *s = idebus_active_if(bus);
2296     uint8_t *p;
2297     int ret;
2298 
2299     /* PIO data access allowed only when DRQ bit is set. The result of a read
2300      * during PIO in is indeterminate, return 0 and don't move forward. */
2301     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2302         return 0;
2303     }
2304 
2305     p = s->data_ptr;
2306     if (p + 4 > s->data_end) {
2307         return 0;
2308     }
2309 
2310     ret = cpu_to_le32(*(uint32_t *)p);
2311     p += 4;
2312     s->data_ptr = p;
2313     if (p >= s->data_end) {
2314         s->status &= ~DRQ_STAT;
2315         s->end_transfer_func(s);
2316     }
2317     return ret;
2318 }
2319 
2320 static void ide_dummy_transfer_stop(IDEState *s)
2321 {
2322     s->data_ptr = s->io_buffer;
2323     s->data_end = s->io_buffer;
2324     s->io_buffer[0] = 0xff;
2325     s->io_buffer[1] = 0xff;
2326     s->io_buffer[2] = 0xff;
2327     s->io_buffer[3] = 0xff;
2328 }
2329 
2330 void ide_bus_reset(IDEBus *bus)
2331 {
2332     bus->unit = 0;
2333     bus->cmd = 0;
2334     ide_reset(&bus->ifs[0]);
2335     ide_reset(&bus->ifs[1]);
2336     ide_clear_hob(bus);
2337 
2338     /* pending async DMA */
2339     if (bus->dma->aiocb) {
2340 #ifdef DEBUG_AIO
2341         printf("aio_cancel\n");
2342 #endif
2343         blk_aio_cancel(bus->dma->aiocb);
2344         bus->dma->aiocb = NULL;
2345     }
2346 
2347     /* reset dma provider too */
2348     if (bus->dma->ops->reset) {
2349         bus->dma->ops->reset(bus->dma);
2350     }
2351 }
2352 
2353 static bool ide_cd_is_tray_open(void *opaque)
2354 {
2355     return ((IDEState *)opaque)->tray_open;
2356 }
2357 
2358 static bool ide_cd_is_medium_locked(void *opaque)
2359 {
2360     return ((IDEState *)opaque)->tray_locked;
2361 }
2362 
2363 static void ide_resize_cb(void *opaque)
2364 {
2365     IDEState *s = opaque;
2366     uint64_t nb_sectors;
2367 
2368     if (!s->identify_set) {
2369         return;
2370     }
2371 
2372     blk_get_geometry(s->blk, &nb_sectors);
2373     s->nb_sectors = nb_sectors;
2374 
2375     /* Update the identify data buffer. */
2376     if (s->drive_kind == IDE_CFATA) {
2377         ide_cfata_identify_size(s);
2378     } else {
2379         /* IDE_CD uses a different set of callbacks entirely. */
2380         assert(s->drive_kind != IDE_CD);
2381         ide_identify_size(s);
2382     }
2383 }
2384 
2385 static const BlockDevOps ide_cd_block_ops = {
2386     .change_media_cb = ide_cd_change_cb,
2387     .eject_request_cb = ide_cd_eject_request_cb,
2388     .is_tray_open = ide_cd_is_tray_open,
2389     .is_medium_locked = ide_cd_is_medium_locked,
2390 };
2391 
2392 static const BlockDevOps ide_hd_block_ops = {
2393     .resize_cb = ide_resize_cb,
2394 };
2395 
2396 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2397                    const char *version, const char *serial, const char *model,
2398                    uint64_t wwn,
2399                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2400                    int chs_trans)
2401 {
2402     uint64_t nb_sectors;
2403 
2404     s->blk = blk;
2405     s->drive_kind = kind;
2406 
2407     blk_get_geometry(blk, &nb_sectors);
2408     s->cylinders = cylinders;
2409     s->heads = heads;
2410     s->sectors = secs;
2411     s->chs_trans = chs_trans;
2412     s->nb_sectors = nb_sectors;
2413     s->wwn = wwn;
2414     /* The SMART values should be preserved across power cycles
2415        but they aren't.  */
2416     s->smart_enabled = 1;
2417     s->smart_autosave = 1;
2418     s->smart_errors = 0;
2419     s->smart_selftest_count = 0;
2420     if (kind == IDE_CD) {
2421         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2422         blk_set_guest_block_size(blk, 2048);
2423     } else {
2424         if (!blk_is_inserted(s->blk)) {
2425             error_report("Device needs media, but drive is empty");
2426             return -1;
2427         }
2428         if (blk_is_read_only(blk)) {
2429             error_report("Can't use a read-only drive");
2430             return -1;
2431         }
2432         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2433     }
2434     if (serial) {
2435         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2436     } else {
2437         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2438                  "QM%05d", s->drive_serial);
2439     }
2440     if (model) {
2441         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2442     } else {
2443         switch (kind) {
2444         case IDE_CD:
2445             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2446             break;
2447         case IDE_CFATA:
2448             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2449             break;
2450         default:
2451             strcpy(s->drive_model_str, "QEMU HARDDISK");
2452             break;
2453         }
2454     }
2455 
2456     if (version) {
2457         pstrcpy(s->version, sizeof(s->version), version);
2458     } else {
2459         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2460     }
2461 
2462     ide_reset(s);
2463     blk_iostatus_enable(blk);
2464     return 0;
2465 }
2466 
2467 static void ide_init1(IDEBus *bus, int unit)
2468 {
2469     static int drive_serial = 1;
2470     IDEState *s = &bus->ifs[unit];
2471 
2472     s->bus = bus;
2473     s->unit = unit;
2474     s->drive_serial = drive_serial++;
2475     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2476     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2477     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2478     memset(s->io_buffer, 0, s->io_buffer_total_len);
2479 
2480     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2481     memset(s->smart_selftest_data, 0, 512);
2482 
2483     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2484                                            ide_sector_write_timer_cb, s);
2485 }
2486 
2487 static int ide_nop_int(IDEDMA *dma, int x)
2488 {
2489     return 0;
2490 }
2491 
2492 static void ide_nop(IDEDMA *dma)
2493 {
2494 }
2495 
2496 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2497 {
2498     return 0;
2499 }
2500 
2501 static const IDEDMAOps ide_dma_nop_ops = {
2502     .prepare_buf    = ide_nop_int32,
2503     .restart_dma    = ide_nop,
2504     .rw_buf         = ide_nop_int,
2505 };
2506 
2507 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2508 {
2509     s->unit = s->bus->retry_unit;
2510     ide_set_sector(s, s->bus->retry_sector_num);
2511     s->nsector = s->bus->retry_nsector;
2512     s->bus->dma->ops->restart_dma(s->bus->dma);
2513     s->io_buffer_size = 0;
2514     s->dma_cmd = dma_cmd;
2515     ide_start_dma(s, ide_dma_cb);
2516 }
2517 
2518 static void ide_restart_bh(void *opaque)
2519 {
2520     IDEBus *bus = opaque;
2521     IDEState *s;
2522     bool is_read;
2523     int error_status;
2524 
2525     qemu_bh_delete(bus->bh);
2526     bus->bh = NULL;
2527 
2528     error_status = bus->error_status;
2529     if (bus->error_status == 0) {
2530         return;
2531     }
2532 
2533     s = idebus_active_if(bus);
2534     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2535 
2536     /* The error status must be cleared before resubmitting the request: The
2537      * request may fail again, and this case can only be distinguished if the
2538      * called function can set a new error status. */
2539     bus->error_status = 0;
2540 
2541     /* The HBA has generically asked to be kicked on retry */
2542     if (error_status & IDE_RETRY_HBA) {
2543         if (s->bus->dma->ops->restart) {
2544             s->bus->dma->ops->restart(s->bus->dma);
2545         }
2546     } else if (IS_IDE_RETRY_DMA(error_status)) {
2547         if (error_status & IDE_RETRY_TRIM) {
2548             ide_restart_dma(s, IDE_DMA_TRIM);
2549         } else {
2550             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2551         }
2552     } else if (IS_IDE_RETRY_PIO(error_status)) {
2553         if (is_read) {
2554             ide_sector_read(s);
2555         } else {
2556             ide_sector_write(s);
2557         }
2558     } else if (error_status & IDE_RETRY_FLUSH) {
2559         ide_flush_cache(s);
2560     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2561         assert(s->end_transfer_func == ide_atapi_cmd);
2562         ide_atapi_dma_restart(s);
2563     } else {
2564         abort();
2565     }
2566 }
2567 
2568 static void ide_restart_cb(void *opaque, int running, RunState state)
2569 {
2570     IDEBus *bus = opaque;
2571 
2572     if (!running)
2573         return;
2574 
2575     if (!bus->bh) {
2576         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2577         qemu_bh_schedule(bus->bh);
2578     }
2579 }
2580 
2581 void ide_register_restart_cb(IDEBus *bus)
2582 {
2583     if (bus->dma->ops->restart_dma) {
2584         qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2585     }
2586 }
2587 
2588 static IDEDMA ide_dma_nop = {
2589     .ops = &ide_dma_nop_ops,
2590     .aiocb = NULL,
2591 };
2592 
2593 void ide_init2(IDEBus *bus, qemu_irq irq)
2594 {
2595     int i;
2596 
2597     for(i = 0; i < 2; i++) {
2598         ide_init1(bus, i);
2599         ide_reset(&bus->ifs[i]);
2600     }
2601     bus->irq = irq;
2602     bus->dma = &ide_dma_nop;
2603 }
2604 
2605 static const MemoryRegionPortio ide_portio_list[] = {
2606     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2607     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2608     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2609     PORTIO_END_OF_LIST(),
2610 };
2611 
2612 static const MemoryRegionPortio ide_portio2_list[] = {
2613     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2614     PORTIO_END_OF_LIST(),
2615 };
2616 
2617 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2618 {
2619     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2620        bridge has been setup properly to always register with ISA.  */
2621     isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2622 
2623     if (iobase2) {
2624         isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2625     }
2626 }
2627 
2628 static bool is_identify_set(void *opaque, int version_id)
2629 {
2630     IDEState *s = opaque;
2631 
2632     return s->identify_set != 0;
2633 }
2634 
2635 static EndTransferFunc* transfer_end_table[] = {
2636         ide_sector_read,
2637         ide_sector_write,
2638         ide_transfer_stop,
2639         ide_atapi_cmd_reply_end,
2640         ide_atapi_cmd,
2641         ide_dummy_transfer_stop,
2642 };
2643 
2644 static int transfer_end_table_idx(EndTransferFunc *fn)
2645 {
2646     int i;
2647 
2648     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2649         if (transfer_end_table[i] == fn)
2650             return i;
2651 
2652     return -1;
2653 }
2654 
2655 static int ide_drive_post_load(void *opaque, int version_id)
2656 {
2657     IDEState *s = opaque;
2658 
2659     if (s->blk && s->identify_set) {
2660         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2661     }
2662     return 0;
2663 }
2664 
2665 static int ide_drive_pio_post_load(void *opaque, int version_id)
2666 {
2667     IDEState *s = opaque;
2668 
2669     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2670         return -EINVAL;
2671     }
2672     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2673     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2674     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2675     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2676 
2677     return 0;
2678 }
2679 
2680 static void ide_drive_pio_pre_save(void *opaque)
2681 {
2682     IDEState *s = opaque;
2683     int idx;
2684 
2685     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2686     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2687 
2688     idx = transfer_end_table_idx(s->end_transfer_func);
2689     if (idx == -1) {
2690         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2691                         __func__);
2692         s->end_transfer_fn_idx = 2;
2693     } else {
2694         s->end_transfer_fn_idx = idx;
2695     }
2696 }
2697 
2698 static bool ide_drive_pio_state_needed(void *opaque)
2699 {
2700     IDEState *s = opaque;
2701 
2702     return ((s->status & DRQ_STAT) != 0)
2703         || (s->bus->error_status & IDE_RETRY_PIO);
2704 }
2705 
2706 static bool ide_tray_state_needed(void *opaque)
2707 {
2708     IDEState *s = opaque;
2709 
2710     return s->tray_open || s->tray_locked;
2711 }
2712 
2713 static bool ide_atapi_gesn_needed(void *opaque)
2714 {
2715     IDEState *s = opaque;
2716 
2717     return s->events.new_media || s->events.eject_request;
2718 }
2719 
2720 static bool ide_error_needed(void *opaque)
2721 {
2722     IDEBus *bus = opaque;
2723 
2724     return (bus->error_status != 0);
2725 }
2726 
2727 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2728 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2729     .name ="ide_drive/atapi/gesn_state",
2730     .version_id = 1,
2731     .minimum_version_id = 1,
2732     .needed = ide_atapi_gesn_needed,
2733     .fields = (VMStateField[]) {
2734         VMSTATE_BOOL(events.new_media, IDEState),
2735         VMSTATE_BOOL(events.eject_request, IDEState),
2736         VMSTATE_END_OF_LIST()
2737     }
2738 };
2739 
2740 static const VMStateDescription vmstate_ide_tray_state = {
2741     .name = "ide_drive/tray_state",
2742     .version_id = 1,
2743     .minimum_version_id = 1,
2744     .needed = ide_tray_state_needed,
2745     .fields = (VMStateField[]) {
2746         VMSTATE_BOOL(tray_open, IDEState),
2747         VMSTATE_BOOL(tray_locked, IDEState),
2748         VMSTATE_END_OF_LIST()
2749     }
2750 };
2751 
2752 static const VMStateDescription vmstate_ide_drive_pio_state = {
2753     .name = "ide_drive/pio_state",
2754     .version_id = 1,
2755     .minimum_version_id = 1,
2756     .pre_save = ide_drive_pio_pre_save,
2757     .post_load = ide_drive_pio_post_load,
2758     .needed = ide_drive_pio_state_needed,
2759     .fields = (VMStateField[]) {
2760         VMSTATE_INT32(req_nb_sectors, IDEState),
2761         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2762 			     vmstate_info_uint8, uint8_t),
2763         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2764         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2765         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2766         VMSTATE_INT32(elementary_transfer_size, IDEState),
2767         VMSTATE_INT32(packet_transfer_size, IDEState),
2768         VMSTATE_END_OF_LIST()
2769     }
2770 };
2771 
2772 const VMStateDescription vmstate_ide_drive = {
2773     .name = "ide_drive",
2774     .version_id = 3,
2775     .minimum_version_id = 0,
2776     .post_load = ide_drive_post_load,
2777     .fields = (VMStateField[]) {
2778         VMSTATE_INT32(mult_sectors, IDEState),
2779         VMSTATE_INT32(identify_set, IDEState),
2780         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2781         VMSTATE_UINT8(feature, IDEState),
2782         VMSTATE_UINT8(error, IDEState),
2783         VMSTATE_UINT32(nsector, IDEState),
2784         VMSTATE_UINT8(sector, IDEState),
2785         VMSTATE_UINT8(lcyl, IDEState),
2786         VMSTATE_UINT8(hcyl, IDEState),
2787         VMSTATE_UINT8(hob_feature, IDEState),
2788         VMSTATE_UINT8(hob_sector, IDEState),
2789         VMSTATE_UINT8(hob_nsector, IDEState),
2790         VMSTATE_UINT8(hob_lcyl, IDEState),
2791         VMSTATE_UINT8(hob_hcyl, IDEState),
2792         VMSTATE_UINT8(select, IDEState),
2793         VMSTATE_UINT8(status, IDEState),
2794         VMSTATE_UINT8(lba48, IDEState),
2795         VMSTATE_UINT8(sense_key, IDEState),
2796         VMSTATE_UINT8(asc, IDEState),
2797         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2798         VMSTATE_END_OF_LIST()
2799     },
2800     .subsections = (const VMStateDescription*[]) {
2801         &vmstate_ide_drive_pio_state,
2802         &vmstate_ide_tray_state,
2803         &vmstate_ide_atapi_gesn_state,
2804         NULL
2805     }
2806 };
2807 
2808 static const VMStateDescription vmstate_ide_error_status = {
2809     .name ="ide_bus/error",
2810     .version_id = 2,
2811     .minimum_version_id = 1,
2812     .needed = ide_error_needed,
2813     .fields = (VMStateField[]) {
2814         VMSTATE_INT32(error_status, IDEBus),
2815         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2816         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2817         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2818         VMSTATE_END_OF_LIST()
2819     }
2820 };
2821 
2822 const VMStateDescription vmstate_ide_bus = {
2823     .name = "ide_bus",
2824     .version_id = 1,
2825     .minimum_version_id = 1,
2826     .fields = (VMStateField[]) {
2827         VMSTATE_UINT8(cmd, IDEBus),
2828         VMSTATE_UINT8(unit, IDEBus),
2829         VMSTATE_END_OF_LIST()
2830     },
2831     .subsections = (const VMStateDescription*[]) {
2832         &vmstate_ide_error_status,
2833         NULL
2834     }
2835 };
2836 
2837 void ide_drive_get(DriveInfo **hd, int n)
2838 {
2839     int i;
2840     int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2841     int max_devs = drive_get_max_devs(IF_IDE);
2842     int n_buses = max_devs ? (n / max_devs) : n;
2843 
2844     /*
2845      * Note: The number of actual buses available is not known.
2846      * We compute this based on the size of the DriveInfo* array, n.
2847      * If it is less than max_devs * <num_real_buses>,
2848      * We will stop looking for drives prematurely instead of overfilling
2849      * the array.
2850      */
2851 
2852     if (highest_bus > n_buses) {
2853         error_report("Too many IDE buses defined (%d > %d)",
2854                      highest_bus, n_buses);
2855         exit(1);
2856     }
2857 
2858     for (i = 0; i < n; i++) {
2859         hd[i] = drive_get_by_index(IF_IDE, i);
2860     }
2861 }
2862