xref: /openbmc/qemu/hw/ide/core.c (revision 74642d091a9e7d5ece11ced18193e1f37d7e0553)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 
38 #include "hw/ide/internal.h"
39 
40 /* These values were based on a Seagate ST3500418AS but have been modified
41    to make more sense in QEMU */
42 static const int smart_attributes[][12] = {
43     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
44     /* raw read error rate*/
45     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
46     /* spin up */
47     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
48     /* start stop count */
49     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
50     /* remapped sectors */
51     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
52     /* power on hours */
53     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54     /* power cycle count */
55     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* airflow-temperature-celsius */
57     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 };
59 
60 static void ide_dummy_transfer_stop(IDEState *s);
61 
62 static void padstr(char *str, const char *src, int len)
63 {
64     int i, v;
65     for(i = 0; i < len; i++) {
66         if (*src)
67             v = *src++;
68         else
69             v = ' ';
70         str[i^1] = v;
71     }
72 }
73 
74 static void put_le16(uint16_t *p, unsigned int v)
75 {
76     *p = cpu_to_le16(v);
77 }
78 
79 static void ide_identify_size(IDEState *s)
80 {
81     uint16_t *p = (uint16_t *)s->identify_data;
82     put_le16(p + 60, s->nb_sectors);
83     put_le16(p + 61, s->nb_sectors >> 16);
84     put_le16(p + 100, s->nb_sectors);
85     put_le16(p + 101, s->nb_sectors >> 16);
86     put_le16(p + 102, s->nb_sectors >> 32);
87     put_le16(p + 103, s->nb_sectors >> 48);
88 }
89 
90 static void ide_identify(IDEState *s)
91 {
92     uint16_t *p;
93     unsigned int oldsize;
94     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
95 
96     p = (uint16_t *)s->identify_data;
97     if (s->identify_set) {
98         goto fill_buffer;
99     }
100     memset(p, 0, sizeof(s->identify_data));
101 
102     put_le16(p + 0, 0x0040);
103     put_le16(p + 1, s->cylinders);
104     put_le16(p + 3, s->heads);
105     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
106     put_le16(p + 5, 512); /* XXX: retired, remove ? */
107     put_le16(p + 6, s->sectors);
108     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
109     put_le16(p + 20, 3); /* XXX: retired, remove ? */
110     put_le16(p + 21, 512); /* cache size in sectors */
111     put_le16(p + 22, 4); /* ecc bytes */
112     padstr((char *)(p + 23), s->version, 8); /* firmware version */
113     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
114 #if MAX_MULT_SECTORS > 1
115     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
116 #endif
117     put_le16(p + 48, 1); /* dword I/O */
118     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
119     put_le16(p + 51, 0x200); /* PIO transfer cycle */
120     put_le16(p + 52, 0x200); /* DMA transfer cycle */
121     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
122     put_le16(p + 54, s->cylinders);
123     put_le16(p + 55, s->heads);
124     put_le16(p + 56, s->sectors);
125     oldsize = s->cylinders * s->heads * s->sectors;
126     put_le16(p + 57, oldsize);
127     put_le16(p + 58, oldsize >> 16);
128     if (s->mult_sectors)
129         put_le16(p + 59, 0x100 | s->mult_sectors);
130     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
131     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
132     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
133     put_le16(p + 63, 0x07); /* mdma0-2 supported */
134     put_le16(p + 64, 0x03); /* pio3-4 supported */
135     put_le16(p + 65, 120);
136     put_le16(p + 66, 120);
137     put_le16(p + 67, 120);
138     put_le16(p + 68, 120);
139     if (dev && dev->conf.discard_granularity) {
140         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
141     }
142 
143     if (s->ncq_queues) {
144         put_le16(p + 75, s->ncq_queues - 1);
145         /* NCQ supported */
146         put_le16(p + 76, (1 << 8));
147     }
148 
149     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
150     put_le16(p + 81, 0x16); /* conforms to ata5 */
151     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
152     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
153     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
154     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
155     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
156     if (s->wwn) {
157         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
158     } else {
159         put_le16(p + 84, (1 << 14) | 0);
160     }
161     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
162     if (blk_enable_write_cache(s->blk)) {
163         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
164     } else {
165         put_le16(p + 85, (1 << 14) | 1);
166     }
167     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
168     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
169     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
170     if (s->wwn) {
171         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
172     } else {
173         put_le16(p + 87, (1 << 14) | 0);
174     }
175     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
176     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
177     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
178     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
179     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
180     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 
182     if (dev && dev->conf.physical_block_size)
183         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
184     if (s->wwn) {
185         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
186         put_le16(p + 108, s->wwn >> 48);
187         put_le16(p + 109, s->wwn >> 32);
188         put_le16(p + 110, s->wwn >> 16);
189         put_le16(p + 111, s->wwn);
190     }
191     if (dev && dev->conf.discard_granularity) {
192         put_le16(p + 169, 1); /* TRIM support */
193     }
194 
195     ide_identify_size(s);
196     s->identify_set = 1;
197 
198 fill_buffer:
199     memcpy(s->io_buffer, p, sizeof(s->identify_data));
200 }
201 
202 static void ide_atapi_identify(IDEState *s)
203 {
204     uint16_t *p;
205 
206     p = (uint16_t *)s->identify_data;
207     if (s->identify_set) {
208         goto fill_buffer;
209     }
210     memset(p, 0, sizeof(s->identify_data));
211 
212     /* Removable CDROM, 50us response, 12 byte packets */
213     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
214     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
215     put_le16(p + 20, 3); /* buffer type */
216     put_le16(p + 21, 512); /* cache size in sectors */
217     put_le16(p + 22, 4); /* ecc bytes */
218     padstr((char *)(p + 23), s->version, 8); /* firmware version */
219     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
220     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
221 #ifdef USE_DMA_CDROM
222     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
223     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
224     put_le16(p + 62, 7);  /* single word dma0-2 supported */
225     put_le16(p + 63, 7);  /* mdma0-2 supported */
226 #else
227     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
228     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
229     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
230 #endif
231     put_le16(p + 64, 3); /* pio3-4 supported */
232     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
233     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
234     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
235     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 
237     put_le16(p + 71, 30); /* in ns */
238     put_le16(p + 72, 30); /* in ns */
239 
240     if (s->ncq_queues) {
241         put_le16(p + 75, s->ncq_queues - 1);
242         /* NCQ supported */
243         put_le16(p + 76, (1 << 8));
244     }
245 
246     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
247     if (s->wwn) {
248         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
249         put_le16(p + 87, (1 << 8)); /* WWN enabled */
250     }
251 
252 #ifdef USE_DMA_CDROM
253     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
254 #endif
255 
256     if (s->wwn) {
257         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
258         put_le16(p + 108, s->wwn >> 48);
259         put_le16(p + 109, s->wwn >> 32);
260         put_le16(p + 110, s->wwn >> 16);
261         put_le16(p + 111, s->wwn);
262     }
263 
264     s->identify_set = 1;
265 
266 fill_buffer:
267     memcpy(s->io_buffer, p, sizeof(s->identify_data));
268 }
269 
270 static void ide_cfata_identify_size(IDEState *s)
271 {
272     uint16_t *p = (uint16_t *)s->identify_data;
273     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
274     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
275     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
276     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
277 }
278 
279 static void ide_cfata_identify(IDEState *s)
280 {
281     uint16_t *p;
282     uint32_t cur_sec;
283 
284     p = (uint16_t *)s->identify_data;
285     if (s->identify_set) {
286         goto fill_buffer;
287     }
288     memset(p, 0, sizeof(s->identify_data));
289 
290     cur_sec = s->cylinders * s->heads * s->sectors;
291 
292     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
293     put_le16(p + 1, s->cylinders);		/* Default cylinders */
294     put_le16(p + 3, s->heads);			/* Default heads */
295     put_le16(p + 6, s->sectors);		/* Default sectors per track */
296     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
297     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
298     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
299     put_le16(p + 22, 0x0004);			/* ECC bytes */
300     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
301     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
302 #if MAX_MULT_SECTORS > 1
303     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
304 #else
305     put_le16(p + 47, 0x0000);
306 #endif
307     put_le16(p + 49, 0x0f00);			/* Capabilities */
308     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
309     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
310     put_le16(p + 53, 0x0003);			/* Translation params valid */
311     put_le16(p + 54, s->cylinders);		/* Current cylinders */
312     put_le16(p + 55, s->heads);			/* Current heads */
313     put_le16(p + 56, s->sectors);		/* Current sectors */
314     put_le16(p + 57, cur_sec);			/* Current capacity */
315     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
316     if (s->mult_sectors)			/* Multiple sector setting */
317         put_le16(p + 59, 0x100 | s->mult_sectors);
318     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
319     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
320     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
321     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
322     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
323     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
324     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
325     put_le16(p + 82, 0x400c);			/* Command Set supported */
326     put_le16(p + 83, 0x7068);			/* Command Set supported */
327     put_le16(p + 84, 0x4000);			/* Features supported */
328     put_le16(p + 85, 0x000c);			/* Command Set enabled */
329     put_le16(p + 86, 0x7044);			/* Command Set enabled */
330     put_le16(p + 87, 0x4000);			/* Features enabled */
331     put_le16(p + 91, 0x4060);			/* Current APM level */
332     put_le16(p + 129, 0x0002);			/* Current features option */
333     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
334     put_le16(p + 131, 0x0001);			/* Initial power mode */
335     put_le16(p + 132, 0x0000);			/* User signature */
336     put_le16(p + 160, 0x8100);			/* Power requirement */
337     put_le16(p + 161, 0x8001);			/* CF command set */
338 
339     ide_cfata_identify_size(s);
340     s->identify_set = 1;
341 
342 fill_buffer:
343     memcpy(s->io_buffer, p, sizeof(s->identify_data));
344 }
345 
346 static void ide_set_signature(IDEState *s)
347 {
348     s->select &= 0xf0; /* clear head */
349     /* put signature */
350     s->nsector = 1;
351     s->sector = 1;
352     if (s->drive_kind == IDE_CD) {
353         s->lcyl = 0x14;
354         s->hcyl = 0xeb;
355     } else if (s->blk) {
356         s->lcyl = 0;
357         s->hcyl = 0;
358     } else {
359         s->lcyl = 0xff;
360         s->hcyl = 0xff;
361     }
362 }
363 
364 typedef struct TrimAIOCB {
365     BlockAIOCB common;
366     BlockBackend *blk;
367     QEMUBH *bh;
368     int ret;
369     QEMUIOVector *qiov;
370     BlockAIOCB *aiocb;
371     int i, j;
372 } TrimAIOCB;
373 
374 static void trim_aio_cancel(BlockAIOCB *acb)
375 {
376     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
377 
378     /* Exit the loop so ide_issue_trim_cb will not continue  */
379     iocb->j = iocb->qiov->niov - 1;
380     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
381 
382     iocb->ret = -ECANCELED;
383 
384     if (iocb->aiocb) {
385         blk_aio_cancel_async(iocb->aiocb);
386         iocb->aiocb = NULL;
387     }
388 }
389 
390 static const AIOCBInfo trim_aiocb_info = {
391     .aiocb_size         = sizeof(TrimAIOCB),
392     .cancel_async       = trim_aio_cancel,
393 };
394 
395 static void ide_trim_bh_cb(void *opaque)
396 {
397     TrimAIOCB *iocb = opaque;
398 
399     iocb->common.cb(iocb->common.opaque, iocb->ret);
400 
401     qemu_bh_delete(iocb->bh);
402     iocb->bh = NULL;
403     qemu_aio_unref(iocb);
404 }
405 
406 static void ide_issue_trim_cb(void *opaque, int ret)
407 {
408     TrimAIOCB *iocb = opaque;
409     if (ret >= 0) {
410         while (iocb->j < iocb->qiov->niov) {
411             int j = iocb->j;
412             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
413                 int i = iocb->i;
414                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
415 
416                 /* 6-byte LBA + 2-byte range per entry */
417                 uint64_t entry = le64_to_cpu(buffer[i]);
418                 uint64_t sector = entry & 0x0000ffffffffffffULL;
419                 uint16_t count = entry >> 48;
420 
421                 if (count == 0) {
422                     continue;
423                 }
424 
425                 /* Got an entry! Submit and exit.  */
426                 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
427                                               ide_issue_trim_cb, opaque);
428                 return;
429             }
430 
431             iocb->j++;
432             iocb->i = -1;
433         }
434     } else {
435         iocb->ret = ret;
436     }
437 
438     iocb->aiocb = NULL;
439     if (iocb->bh) {
440         qemu_bh_schedule(iocb->bh);
441     }
442 }
443 
444 BlockAIOCB *ide_issue_trim(
445         int64_t offset, QEMUIOVector *qiov,
446         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
447 {
448     BlockBackend *blk = opaque;
449     TrimAIOCB *iocb;
450 
451     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
452     iocb->blk = blk;
453     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
454     iocb->ret = 0;
455     iocb->qiov = qiov;
456     iocb->i = -1;
457     iocb->j = 0;
458     ide_issue_trim_cb(iocb, 0);
459     return &iocb->common;
460 }
461 
462 void ide_abort_command(IDEState *s)
463 {
464     ide_transfer_stop(s);
465     s->status = READY_STAT | ERR_STAT;
466     s->error = ABRT_ERR;
467 }
468 
469 static void ide_set_retry(IDEState *s)
470 {
471     s->bus->retry_unit = s->unit;
472     s->bus->retry_sector_num = ide_get_sector(s);
473     s->bus->retry_nsector = s->nsector;
474 }
475 
476 static void ide_clear_retry(IDEState *s)
477 {
478     s->bus->retry_unit = -1;
479     s->bus->retry_sector_num = 0;
480     s->bus->retry_nsector = 0;
481 }
482 
483 /* prepare data transfer and tell what to do after */
484 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
485                         EndTransferFunc *end_transfer_func)
486 {
487     s->end_transfer_func = end_transfer_func;
488     s->data_ptr = buf;
489     s->data_end = buf + size;
490     ide_set_retry(s);
491     if (!(s->status & ERR_STAT)) {
492         s->status |= DRQ_STAT;
493     }
494     if (s->bus->dma->ops->start_transfer) {
495         s->bus->dma->ops->start_transfer(s->bus->dma);
496     }
497 }
498 
499 static void ide_cmd_done(IDEState *s)
500 {
501     if (s->bus->dma->ops->cmd_done) {
502         s->bus->dma->ops->cmd_done(s->bus->dma);
503     }
504 }
505 
506 static void ide_transfer_halt(IDEState *s,
507                               void(*end_transfer_func)(IDEState *),
508                               bool notify)
509 {
510     s->end_transfer_func = end_transfer_func;
511     s->data_ptr = s->io_buffer;
512     s->data_end = s->io_buffer;
513     s->status &= ~DRQ_STAT;
514     if (notify) {
515         ide_cmd_done(s);
516     }
517 }
518 
519 void ide_transfer_stop(IDEState *s)
520 {
521     ide_transfer_halt(s, ide_transfer_stop, true);
522 }
523 
524 static void ide_transfer_cancel(IDEState *s)
525 {
526     ide_transfer_halt(s, ide_transfer_cancel, false);
527 }
528 
529 int64_t ide_get_sector(IDEState *s)
530 {
531     int64_t sector_num;
532     if (s->select & 0x40) {
533         /* lba */
534 	if (!s->lba48) {
535 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
536 		(s->lcyl << 8) | s->sector;
537 	} else {
538 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
539 		((int64_t) s->hob_lcyl << 32) |
540 		((int64_t) s->hob_sector << 24) |
541 		((int64_t) s->hcyl << 16) |
542 		((int64_t) s->lcyl << 8) | s->sector;
543 	}
544     } else {
545         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
546             (s->select & 0x0f) * s->sectors + (s->sector - 1);
547     }
548     return sector_num;
549 }
550 
551 void ide_set_sector(IDEState *s, int64_t sector_num)
552 {
553     unsigned int cyl, r;
554     if (s->select & 0x40) {
555 	if (!s->lba48) {
556             s->select = (s->select & 0xf0) | (sector_num >> 24);
557             s->hcyl = (sector_num >> 16);
558             s->lcyl = (sector_num >> 8);
559             s->sector = (sector_num);
560 	} else {
561 	    s->sector = sector_num;
562 	    s->lcyl = sector_num >> 8;
563 	    s->hcyl = sector_num >> 16;
564 	    s->hob_sector = sector_num >> 24;
565 	    s->hob_lcyl = sector_num >> 32;
566 	    s->hob_hcyl = sector_num >> 40;
567 	}
568     } else {
569         cyl = sector_num / (s->heads * s->sectors);
570         r = sector_num % (s->heads * s->sectors);
571         s->hcyl = cyl >> 8;
572         s->lcyl = cyl;
573         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
574         s->sector = (r % s->sectors) + 1;
575     }
576 }
577 
578 static void ide_rw_error(IDEState *s) {
579     ide_abort_command(s);
580     ide_set_irq(s->bus);
581 }
582 
583 static bool ide_sect_range_ok(IDEState *s,
584                               uint64_t sector, uint64_t nb_sectors)
585 {
586     uint64_t total_sectors;
587 
588     blk_get_geometry(s->blk, &total_sectors);
589     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
590         return false;
591     }
592     return true;
593 }
594 
595 static void ide_buffered_readv_cb(void *opaque, int ret)
596 {
597     IDEBufferedRequest *req = opaque;
598     if (!req->orphaned) {
599         if (!ret) {
600             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
601                                 req->original_qiov->size);
602         }
603         req->original_cb(req->original_opaque, ret);
604     }
605     QLIST_REMOVE(req, list);
606     qemu_vfree(req->iov.iov_base);
607     g_free(req);
608 }
609 
610 #define MAX_BUFFERED_REQS 16
611 
612 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
613                                QEMUIOVector *iov, int nb_sectors,
614                                BlockCompletionFunc *cb, void *opaque)
615 {
616     BlockAIOCB *aioreq;
617     IDEBufferedRequest *req;
618     int c = 0;
619 
620     QLIST_FOREACH(req, &s->buffered_requests, list) {
621         c++;
622     }
623     if (c > MAX_BUFFERED_REQS) {
624         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
625     }
626 
627     req = g_new0(IDEBufferedRequest, 1);
628     req->original_qiov = iov;
629     req->original_cb = cb;
630     req->original_opaque = opaque;
631     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
632     req->iov.iov_len = iov->size;
633     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
634 
635     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
636                             &req->qiov, 0, ide_buffered_readv_cb, req);
637 
638     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
639     return aioreq;
640 }
641 
642 /**
643  * Cancel all pending DMA requests.
644  * Any buffered DMA requests are instantly canceled,
645  * but any pending unbuffered DMA requests must be waited on.
646  */
647 void ide_cancel_dma_sync(IDEState *s)
648 {
649     IDEBufferedRequest *req;
650 
651     /* First invoke the callbacks of all buffered requests
652      * and flag those requests as orphaned. Ideally there
653      * are no unbuffered (Scatter Gather DMA Requests or
654      * write requests) pending and we can avoid to drain. */
655     QLIST_FOREACH(req, &s->buffered_requests, list) {
656         if (!req->orphaned) {
657 #ifdef DEBUG_IDE
658             printf("%s: invoking cb %p of buffered request %p with"
659                    " -ECANCELED\n", __func__, req->original_cb, req);
660 #endif
661             req->original_cb(req->original_opaque, -ECANCELED);
662         }
663         req->orphaned = true;
664     }
665 
666     /*
667      * We can't cancel Scatter Gather DMA in the middle of the
668      * operation or a partial (not full) DMA transfer would reach
669      * the storage so we wait for completion instead (we beahve
670      * like if the DMA was completed by the time the guest trying
671      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
672      * set).
673      *
674      * In the future we'll be able to safely cancel the I/O if the
675      * whole DMA operation will be submitted to disk with a single
676      * aio operation with preadv/pwritev.
677      */
678     if (s->bus->dma->aiocb) {
679 #ifdef DEBUG_IDE
680         printf("%s: draining all remaining requests", __func__);
681 #endif
682         blk_drain(s->blk);
683         assert(s->bus->dma->aiocb == NULL);
684     }
685 }
686 
687 static void ide_sector_read(IDEState *s);
688 
689 static void ide_sector_read_cb(void *opaque, int ret)
690 {
691     IDEState *s = opaque;
692     int n;
693 
694     s->pio_aiocb = NULL;
695     s->status &= ~BUSY_STAT;
696 
697     if (ret == -ECANCELED) {
698         return;
699     }
700     if (ret != 0) {
701         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
702                                 IDE_RETRY_READ)) {
703             return;
704         }
705     }
706 
707     block_acct_done(blk_get_stats(s->blk), &s->acct);
708 
709     n = s->nsector;
710     if (n > s->req_nb_sectors) {
711         n = s->req_nb_sectors;
712     }
713 
714     ide_set_sector(s, ide_get_sector(s) + n);
715     s->nsector -= n;
716     /* Allow the guest to read the io_buffer */
717     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
718     ide_set_irq(s->bus);
719 }
720 
721 static void ide_sector_read(IDEState *s)
722 {
723     int64_t sector_num;
724     int n;
725 
726     s->status = READY_STAT | SEEK_STAT;
727     s->error = 0; /* not needed by IDE spec, but needed by Windows */
728     sector_num = ide_get_sector(s);
729     n = s->nsector;
730 
731     if (n == 0) {
732         ide_transfer_stop(s);
733         return;
734     }
735 
736     s->status |= BUSY_STAT;
737 
738     if (n > s->req_nb_sectors) {
739         n = s->req_nb_sectors;
740     }
741 
742 #if defined(DEBUG_IDE)
743     printf("sector=%" PRId64 "\n", sector_num);
744 #endif
745 
746     if (!ide_sect_range_ok(s, sector_num, n)) {
747         ide_rw_error(s);
748         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
749         return;
750     }
751 
752     s->iov.iov_base = s->io_buffer;
753     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
754     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
755 
756     block_acct_start(blk_get_stats(s->blk), &s->acct,
757                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
758     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
759                                       ide_sector_read_cb, s);
760 }
761 
762 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
763 {
764     if (s->bus->dma->ops->commit_buf) {
765         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
766     }
767     s->io_buffer_offset += tx_bytes;
768     qemu_sglist_destroy(&s->sg);
769 }
770 
771 void ide_set_inactive(IDEState *s, bool more)
772 {
773     s->bus->dma->aiocb = NULL;
774     ide_clear_retry(s);
775     if (s->bus->dma->ops->set_inactive) {
776         s->bus->dma->ops->set_inactive(s->bus->dma, more);
777     }
778     ide_cmd_done(s);
779 }
780 
781 void ide_dma_error(IDEState *s)
782 {
783     dma_buf_commit(s, 0);
784     ide_abort_command(s);
785     ide_set_inactive(s, false);
786     ide_set_irq(s->bus);
787 }
788 
789 int ide_handle_rw_error(IDEState *s, int error, int op)
790 {
791     bool is_read = (op & IDE_RETRY_READ) != 0;
792     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
793 
794     if (action == BLOCK_ERROR_ACTION_STOP) {
795         assert(s->bus->retry_unit == s->unit);
796         s->bus->error_status = op;
797     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
798         block_acct_failed(blk_get_stats(s->blk), &s->acct);
799         if (IS_IDE_RETRY_DMA(op)) {
800             ide_dma_error(s);
801         } else if (IS_IDE_RETRY_ATAPI(op)) {
802             ide_atapi_io_error(s, -error);
803         } else {
804             ide_rw_error(s);
805         }
806     }
807     blk_error_action(s->blk, action, is_read, error);
808     return action != BLOCK_ERROR_ACTION_IGNORE;
809 }
810 
811 static void ide_dma_cb(void *opaque, int ret)
812 {
813     IDEState *s = opaque;
814     int n;
815     int64_t sector_num;
816     uint64_t offset;
817     bool stay_active = false;
818 
819     if (ret == -ECANCELED) {
820         return;
821     }
822     if (ret < 0) {
823         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
824             return;
825         }
826     }
827 
828     n = s->io_buffer_size >> 9;
829     if (n > s->nsector) {
830         /* The PRDs were longer than needed for this request. Shorten them so
831          * we don't get a negative remainder. The Active bit must remain set
832          * after the request completes. */
833         n = s->nsector;
834         stay_active = true;
835     }
836 
837     sector_num = ide_get_sector(s);
838     if (n > 0) {
839         assert(n * 512 == s->sg.size);
840         dma_buf_commit(s, s->sg.size);
841         sector_num += n;
842         ide_set_sector(s, sector_num);
843         s->nsector -= n;
844     }
845 
846     /* end of transfer ? */
847     if (s->nsector == 0) {
848         s->status = READY_STAT | SEEK_STAT;
849         ide_set_irq(s->bus);
850         goto eot;
851     }
852 
853     /* launch next transfer */
854     n = s->nsector;
855     s->io_buffer_index = 0;
856     s->io_buffer_size = n * 512;
857     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
858         /* The PRDs were too short. Reset the Active bit, but don't raise an
859          * interrupt. */
860         s->status = READY_STAT | SEEK_STAT;
861         dma_buf_commit(s, 0);
862         goto eot;
863     }
864 
865 #ifdef DEBUG_AIO
866     printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
867            sector_num, n, s->dma_cmd);
868 #endif
869 
870     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
871         !ide_sect_range_ok(s, sector_num, n)) {
872         ide_dma_error(s);
873         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
874         return;
875     }
876 
877     offset = sector_num << BDRV_SECTOR_BITS;
878     switch (s->dma_cmd) {
879     case IDE_DMA_READ:
880         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
881                                           ide_dma_cb, s);
882         break;
883     case IDE_DMA_WRITE:
884         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
885                                            ide_dma_cb, s);
886         break;
887     case IDE_DMA_TRIM:
888         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
889                                         &s->sg, offset,
890                                         ide_issue_trim, s->blk, ide_dma_cb, s,
891                                         DMA_DIRECTION_TO_DEVICE);
892         break;
893     default:
894         abort();
895     }
896     return;
897 
898 eot:
899     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
900         block_acct_done(blk_get_stats(s->blk), &s->acct);
901     }
902     ide_set_inactive(s, stay_active);
903 }
904 
905 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
906 {
907     s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
908     s->io_buffer_size = 0;
909     s->dma_cmd = dma_cmd;
910 
911     switch (dma_cmd) {
912     case IDE_DMA_READ:
913         block_acct_start(blk_get_stats(s->blk), &s->acct,
914                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
915         break;
916     case IDE_DMA_WRITE:
917         block_acct_start(blk_get_stats(s->blk), &s->acct,
918                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
919         break;
920     default:
921         break;
922     }
923 
924     ide_start_dma(s, ide_dma_cb);
925 }
926 
927 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
928 {
929     s->io_buffer_index = 0;
930     ide_set_retry(s);
931     if (s->bus->dma->ops->start_dma) {
932         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
933     }
934 }
935 
936 static void ide_sector_write(IDEState *s);
937 
938 static void ide_sector_write_timer_cb(void *opaque)
939 {
940     IDEState *s = opaque;
941     ide_set_irq(s->bus);
942 }
943 
944 static void ide_sector_write_cb(void *opaque, int ret)
945 {
946     IDEState *s = opaque;
947     int n;
948 
949     if (ret == -ECANCELED) {
950         return;
951     }
952 
953     s->pio_aiocb = NULL;
954     s->status &= ~BUSY_STAT;
955 
956     if (ret != 0) {
957         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
958             return;
959         }
960     }
961 
962     block_acct_done(blk_get_stats(s->blk), &s->acct);
963 
964     n = s->nsector;
965     if (n > s->req_nb_sectors) {
966         n = s->req_nb_sectors;
967     }
968     s->nsector -= n;
969 
970     ide_set_sector(s, ide_get_sector(s) + n);
971     if (s->nsector == 0) {
972         /* no more sectors to write */
973         ide_transfer_stop(s);
974     } else {
975         int n1 = s->nsector;
976         if (n1 > s->req_nb_sectors) {
977             n1 = s->req_nb_sectors;
978         }
979         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
980                            ide_sector_write);
981     }
982 
983     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
984         /* It seems there is a bug in the Windows 2000 installer HDD
985            IDE driver which fills the disk with empty logs when the
986            IDE write IRQ comes too early. This hack tries to correct
987            that at the expense of slower write performances. Use this
988            option _only_ to install Windows 2000. You must disable it
989            for normal use. */
990         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
991                   (NANOSECONDS_PER_SECOND / 1000));
992     } else {
993         ide_set_irq(s->bus);
994     }
995 }
996 
997 static void ide_sector_write(IDEState *s)
998 {
999     int64_t sector_num;
1000     int n;
1001 
1002     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1003     sector_num = ide_get_sector(s);
1004 #if defined(DEBUG_IDE)
1005     printf("sector=%" PRId64 "\n", sector_num);
1006 #endif
1007     n = s->nsector;
1008     if (n > s->req_nb_sectors) {
1009         n = s->req_nb_sectors;
1010     }
1011 
1012     if (!ide_sect_range_ok(s, sector_num, n)) {
1013         ide_rw_error(s);
1014         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1015         return;
1016     }
1017 
1018     s->iov.iov_base = s->io_buffer;
1019     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1020     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1021 
1022     block_acct_start(blk_get_stats(s->blk), &s->acct,
1023                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1024     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1025                                    &s->qiov, 0, ide_sector_write_cb, s);
1026 }
1027 
1028 static void ide_flush_cb(void *opaque, int ret)
1029 {
1030     IDEState *s = opaque;
1031 
1032     s->pio_aiocb = NULL;
1033 
1034     if (ret == -ECANCELED) {
1035         return;
1036     }
1037     if (ret < 0) {
1038         /* XXX: What sector number to set here? */
1039         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1040             return;
1041         }
1042     }
1043 
1044     if (s->blk) {
1045         block_acct_done(blk_get_stats(s->blk), &s->acct);
1046     }
1047     s->status = READY_STAT | SEEK_STAT;
1048     ide_cmd_done(s);
1049     ide_set_irq(s->bus);
1050 }
1051 
1052 static void ide_flush_cache(IDEState *s)
1053 {
1054     if (s->blk == NULL) {
1055         ide_flush_cb(s, 0);
1056         return;
1057     }
1058 
1059     s->status |= BUSY_STAT;
1060     ide_set_retry(s);
1061     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1062     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1063 }
1064 
1065 static void ide_cfata_metadata_inquiry(IDEState *s)
1066 {
1067     uint16_t *p;
1068     uint32_t spd;
1069 
1070     p = (uint16_t *) s->io_buffer;
1071     memset(p, 0, 0x200);
1072     spd = ((s->mdata_size - 1) >> 9) + 1;
1073 
1074     put_le16(p + 0, 0x0001);			/* Data format revision */
1075     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1076     put_le16(p + 2, s->media_changed);		/* Media status */
1077     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1078     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1079     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1080     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1081 }
1082 
1083 static void ide_cfata_metadata_read(IDEState *s)
1084 {
1085     uint16_t *p;
1086 
1087     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1088         s->status = ERR_STAT;
1089         s->error = ABRT_ERR;
1090         return;
1091     }
1092 
1093     p = (uint16_t *) s->io_buffer;
1094     memset(p, 0, 0x200);
1095 
1096     put_le16(p + 0, s->media_changed);		/* Media status */
1097     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1098                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1099                                     s->nsector << 9), 0x200 - 2));
1100 }
1101 
1102 static void ide_cfata_metadata_write(IDEState *s)
1103 {
1104     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1105         s->status = ERR_STAT;
1106         s->error = ABRT_ERR;
1107         return;
1108     }
1109 
1110     s->media_changed = 0;
1111 
1112     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1113                     s->io_buffer + 2,
1114                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1115                                     s->nsector << 9), 0x200 - 2));
1116 }
1117 
1118 /* called when the inserted state of the media has changed */
1119 static void ide_cd_change_cb(void *opaque, bool load)
1120 {
1121     IDEState *s = opaque;
1122     uint64_t nb_sectors;
1123 
1124     s->tray_open = !load;
1125     blk_get_geometry(s->blk, &nb_sectors);
1126     s->nb_sectors = nb_sectors;
1127 
1128     /*
1129      * First indicate to the guest that a CD has been removed.  That's
1130      * done on the next command the guest sends us.
1131      *
1132      * Then we set UNIT_ATTENTION, by which the guest will
1133      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1134      */
1135     s->cdrom_changed = 1;
1136     s->events.new_media = true;
1137     s->events.eject_request = false;
1138     ide_set_irq(s->bus);
1139 }
1140 
1141 static void ide_cd_eject_request_cb(void *opaque, bool force)
1142 {
1143     IDEState *s = opaque;
1144 
1145     s->events.eject_request = true;
1146     if (force) {
1147         s->tray_locked = false;
1148     }
1149     ide_set_irq(s->bus);
1150 }
1151 
1152 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1153 {
1154     s->lba48 = lba48;
1155 
1156     /* handle the 'magic' 0 nsector count conversion here. to avoid
1157      * fiddling with the rest of the read logic, we just store the
1158      * full sector count in ->nsector and ignore ->hob_nsector from now
1159      */
1160     if (!s->lba48) {
1161 	if (!s->nsector)
1162 	    s->nsector = 256;
1163     } else {
1164 	if (!s->nsector && !s->hob_nsector)
1165 	    s->nsector = 65536;
1166 	else {
1167 	    int lo = s->nsector;
1168 	    int hi = s->hob_nsector;
1169 
1170 	    s->nsector = (hi << 8) | lo;
1171 	}
1172     }
1173 }
1174 
1175 static void ide_clear_hob(IDEBus *bus)
1176 {
1177     /* any write clears HOB high bit of device control register */
1178     bus->ifs[0].select &= ~(1 << 7);
1179     bus->ifs[1].select &= ~(1 << 7);
1180 }
1181 
1182 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1183 {
1184     IDEBus *bus = opaque;
1185 
1186 #ifdef DEBUG_IDE
1187     printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1188 #endif
1189 
1190     addr &= 7;
1191 
1192     /* ignore writes to command block while busy with previous command */
1193     if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1194         return;
1195 
1196     switch(addr) {
1197     case 0:
1198         break;
1199     case 1:
1200 	ide_clear_hob(bus);
1201         /* NOTE: data is written to the two drives */
1202 	bus->ifs[0].hob_feature = bus->ifs[0].feature;
1203 	bus->ifs[1].hob_feature = bus->ifs[1].feature;
1204         bus->ifs[0].feature = val;
1205         bus->ifs[1].feature = val;
1206         break;
1207     case 2:
1208 	ide_clear_hob(bus);
1209 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1210 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1211         bus->ifs[0].nsector = val;
1212         bus->ifs[1].nsector = val;
1213         break;
1214     case 3:
1215 	ide_clear_hob(bus);
1216 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1217 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1218         bus->ifs[0].sector = val;
1219         bus->ifs[1].sector = val;
1220         break;
1221     case 4:
1222 	ide_clear_hob(bus);
1223 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1224 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1225         bus->ifs[0].lcyl = val;
1226         bus->ifs[1].lcyl = val;
1227         break;
1228     case 5:
1229 	ide_clear_hob(bus);
1230 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1231 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1232         bus->ifs[0].hcyl = val;
1233         bus->ifs[1].hcyl = val;
1234         break;
1235     case 6:
1236 	/* FIXME: HOB readback uses bit 7 */
1237         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1238         bus->ifs[1].select = (val | 0x10) | 0xa0;
1239         /* select drive */
1240         bus->unit = (val >> 4) & 1;
1241         break;
1242     default:
1243     case 7:
1244         /* command */
1245         ide_exec_cmd(bus, val);
1246         break;
1247     }
1248 }
1249 
1250 static void ide_reset(IDEState *s)
1251 {
1252 #ifdef DEBUG_IDE
1253     printf("ide: reset\n");
1254 #endif
1255 
1256     if (s->pio_aiocb) {
1257         blk_aio_cancel(s->pio_aiocb);
1258         s->pio_aiocb = NULL;
1259     }
1260 
1261     if (s->drive_kind == IDE_CFATA)
1262         s->mult_sectors = 0;
1263     else
1264         s->mult_sectors = MAX_MULT_SECTORS;
1265     /* ide regs */
1266     s->feature = 0;
1267     s->error = 0;
1268     s->nsector = 0;
1269     s->sector = 0;
1270     s->lcyl = 0;
1271     s->hcyl = 0;
1272 
1273     /* lba48 */
1274     s->hob_feature = 0;
1275     s->hob_sector = 0;
1276     s->hob_nsector = 0;
1277     s->hob_lcyl = 0;
1278     s->hob_hcyl = 0;
1279 
1280     s->select = 0xa0;
1281     s->status = READY_STAT | SEEK_STAT;
1282 
1283     s->lba48 = 0;
1284 
1285     /* ATAPI specific */
1286     s->sense_key = 0;
1287     s->asc = 0;
1288     s->cdrom_changed = 0;
1289     s->packet_transfer_size = 0;
1290     s->elementary_transfer_size = 0;
1291     s->io_buffer_index = 0;
1292     s->cd_sector_size = 0;
1293     s->atapi_dma = 0;
1294     s->tray_locked = 0;
1295     s->tray_open = 0;
1296     /* ATA DMA state */
1297     s->io_buffer_size = 0;
1298     s->req_nb_sectors = 0;
1299 
1300     ide_set_signature(s);
1301     /* init the transfer handler so that 0xffff is returned on data
1302        accesses */
1303     s->end_transfer_func = ide_dummy_transfer_stop;
1304     ide_dummy_transfer_stop(s);
1305     s->media_changed = 0;
1306 }
1307 
1308 static bool cmd_nop(IDEState *s, uint8_t cmd)
1309 {
1310     return true;
1311 }
1312 
1313 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1314 {
1315     /* Halt PIO (in the DRQ phase), then DMA */
1316     ide_transfer_cancel(s);
1317     ide_cancel_dma_sync(s);
1318 
1319     /* Reset any PIO commands, reset signature, etc */
1320     ide_reset(s);
1321 
1322     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1323      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1324     s->status = 0x00;
1325 
1326     /* Do not overwrite status register */
1327     return false;
1328 }
1329 
1330 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1331 {
1332     switch (s->feature) {
1333     case DSM_TRIM:
1334         if (s->blk) {
1335             ide_sector_start_dma(s, IDE_DMA_TRIM);
1336             return false;
1337         }
1338         break;
1339     }
1340 
1341     ide_abort_command(s);
1342     return true;
1343 }
1344 
1345 static bool cmd_identify(IDEState *s, uint8_t cmd)
1346 {
1347     if (s->blk && s->drive_kind != IDE_CD) {
1348         if (s->drive_kind != IDE_CFATA) {
1349             ide_identify(s);
1350         } else {
1351             ide_cfata_identify(s);
1352         }
1353         s->status = READY_STAT | SEEK_STAT;
1354         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1355         ide_set_irq(s->bus);
1356         return false;
1357     } else {
1358         if (s->drive_kind == IDE_CD) {
1359             ide_set_signature(s);
1360         }
1361         ide_abort_command(s);
1362     }
1363 
1364     return true;
1365 }
1366 
1367 static bool cmd_verify(IDEState *s, uint8_t cmd)
1368 {
1369     bool lba48 = (cmd == WIN_VERIFY_EXT);
1370 
1371     /* do sector number check ? */
1372     ide_cmd_lba48_transform(s, lba48);
1373 
1374     return true;
1375 }
1376 
1377 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1378 {
1379     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1380         /* Disable Read and Write Multiple */
1381         s->mult_sectors = 0;
1382     } else if ((s->nsector & 0xff) != 0 &&
1383         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1384          (s->nsector & (s->nsector - 1)) != 0)) {
1385         ide_abort_command(s);
1386     } else {
1387         s->mult_sectors = s->nsector & 0xff;
1388     }
1389 
1390     return true;
1391 }
1392 
1393 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1394 {
1395     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1396 
1397     if (!s->blk || !s->mult_sectors) {
1398         ide_abort_command(s);
1399         return true;
1400     }
1401 
1402     ide_cmd_lba48_transform(s, lba48);
1403     s->req_nb_sectors = s->mult_sectors;
1404     ide_sector_read(s);
1405     return false;
1406 }
1407 
1408 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1409 {
1410     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1411     int n;
1412 
1413     if (!s->blk || !s->mult_sectors) {
1414         ide_abort_command(s);
1415         return true;
1416     }
1417 
1418     ide_cmd_lba48_transform(s, lba48);
1419 
1420     s->req_nb_sectors = s->mult_sectors;
1421     n = MIN(s->nsector, s->req_nb_sectors);
1422 
1423     s->status = SEEK_STAT | READY_STAT;
1424     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1425 
1426     s->media_changed = 1;
1427 
1428     return false;
1429 }
1430 
1431 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1432 {
1433     bool lba48 = (cmd == WIN_READ_EXT);
1434 
1435     if (s->drive_kind == IDE_CD) {
1436         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1437         ide_abort_command(s);
1438         return true;
1439     }
1440 
1441     if (!s->blk) {
1442         ide_abort_command(s);
1443         return true;
1444     }
1445 
1446     ide_cmd_lba48_transform(s, lba48);
1447     s->req_nb_sectors = 1;
1448     ide_sector_read(s);
1449 
1450     return false;
1451 }
1452 
1453 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1454 {
1455     bool lba48 = (cmd == WIN_WRITE_EXT);
1456 
1457     if (!s->blk) {
1458         ide_abort_command(s);
1459         return true;
1460     }
1461 
1462     ide_cmd_lba48_transform(s, lba48);
1463 
1464     s->req_nb_sectors = 1;
1465     s->status = SEEK_STAT | READY_STAT;
1466     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1467 
1468     s->media_changed = 1;
1469 
1470     return false;
1471 }
1472 
1473 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1474 {
1475     bool lba48 = (cmd == WIN_READDMA_EXT);
1476 
1477     if (!s->blk) {
1478         ide_abort_command(s);
1479         return true;
1480     }
1481 
1482     ide_cmd_lba48_transform(s, lba48);
1483     ide_sector_start_dma(s, IDE_DMA_READ);
1484 
1485     return false;
1486 }
1487 
1488 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1489 {
1490     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1491 
1492     if (!s->blk) {
1493         ide_abort_command(s);
1494         return true;
1495     }
1496 
1497     ide_cmd_lba48_transform(s, lba48);
1498     ide_sector_start_dma(s, IDE_DMA_WRITE);
1499 
1500     s->media_changed = 1;
1501 
1502     return false;
1503 }
1504 
1505 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1506 {
1507     ide_flush_cache(s);
1508     return false;
1509 }
1510 
1511 static bool cmd_seek(IDEState *s, uint8_t cmd)
1512 {
1513     /* XXX: Check that seek is within bounds */
1514     return true;
1515 }
1516 
1517 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1518 {
1519     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1520 
1521     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1522     if (s->nb_sectors == 0) {
1523         ide_abort_command(s);
1524         return true;
1525     }
1526 
1527     ide_cmd_lba48_transform(s, lba48);
1528     ide_set_sector(s, s->nb_sectors - 1);
1529 
1530     return true;
1531 }
1532 
1533 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1534 {
1535     s->nsector = 0xff; /* device active or idle */
1536     return true;
1537 }
1538 
1539 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1540 {
1541     uint16_t *identify_data;
1542 
1543     if (!s->blk) {
1544         ide_abort_command(s);
1545         return true;
1546     }
1547 
1548     /* XXX: valid for CDROM ? */
1549     switch (s->feature) {
1550     case 0x02: /* write cache enable */
1551         blk_set_enable_write_cache(s->blk, true);
1552         identify_data = (uint16_t *)s->identify_data;
1553         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1554         return true;
1555     case 0x82: /* write cache disable */
1556         blk_set_enable_write_cache(s->blk, false);
1557         identify_data = (uint16_t *)s->identify_data;
1558         put_le16(identify_data + 85, (1 << 14) | 1);
1559         ide_flush_cache(s);
1560         return false;
1561     case 0xcc: /* reverting to power-on defaults enable */
1562     case 0x66: /* reverting to power-on defaults disable */
1563     case 0xaa: /* read look-ahead enable */
1564     case 0x55: /* read look-ahead disable */
1565     case 0x05: /* set advanced power management mode */
1566     case 0x85: /* disable advanced power management mode */
1567     case 0x69: /* NOP */
1568     case 0x67: /* NOP */
1569     case 0x96: /* NOP */
1570     case 0x9a: /* NOP */
1571     case 0x42: /* enable Automatic Acoustic Mode */
1572     case 0xc2: /* disable Automatic Acoustic Mode */
1573         return true;
1574     case 0x03: /* set transfer mode */
1575         {
1576             uint8_t val = s->nsector & 0x07;
1577             identify_data = (uint16_t *)s->identify_data;
1578 
1579             switch (s->nsector >> 3) {
1580             case 0x00: /* pio default */
1581             case 0x01: /* pio mode */
1582                 put_le16(identify_data + 62, 0x07);
1583                 put_le16(identify_data + 63, 0x07);
1584                 put_le16(identify_data + 88, 0x3f);
1585                 break;
1586             case 0x02: /* sigle word dma mode*/
1587                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1588                 put_le16(identify_data + 63, 0x07);
1589                 put_le16(identify_data + 88, 0x3f);
1590                 break;
1591             case 0x04: /* mdma mode */
1592                 put_le16(identify_data + 62, 0x07);
1593                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1594                 put_le16(identify_data + 88, 0x3f);
1595                 break;
1596             case 0x08: /* udma mode */
1597                 put_le16(identify_data + 62, 0x07);
1598                 put_le16(identify_data + 63, 0x07);
1599                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1600                 break;
1601             default:
1602                 goto abort_cmd;
1603             }
1604             return true;
1605         }
1606     }
1607 
1608 abort_cmd:
1609     ide_abort_command(s);
1610     return true;
1611 }
1612 
1613 
1614 /*** ATAPI commands ***/
1615 
1616 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1617 {
1618     ide_atapi_identify(s);
1619     s->status = READY_STAT | SEEK_STAT;
1620     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1621     ide_set_irq(s->bus);
1622     return false;
1623 }
1624 
1625 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1626 {
1627     ide_set_signature(s);
1628 
1629     if (s->drive_kind == IDE_CD) {
1630         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1631                         * devices to return a clear status register
1632                         * with READY_STAT *not* set. */
1633         s->error = 0x01;
1634     } else {
1635         s->status = READY_STAT | SEEK_STAT;
1636         /* The bits of the error register are not as usual for this command!
1637          * They are part of the regular output (this is why ERR_STAT isn't set)
1638          * Device 0 passed, Device 1 passed or not present. */
1639         s->error = 0x01;
1640         ide_set_irq(s->bus);
1641     }
1642 
1643     return false;
1644 }
1645 
1646 static bool cmd_packet(IDEState *s, uint8_t cmd)
1647 {
1648     /* overlapping commands not supported */
1649     if (s->feature & 0x02) {
1650         ide_abort_command(s);
1651         return true;
1652     }
1653 
1654     s->status = READY_STAT | SEEK_STAT;
1655     s->atapi_dma = s->feature & 1;
1656     if (s->atapi_dma) {
1657         s->dma_cmd = IDE_DMA_ATAPI;
1658     }
1659     s->nsector = 1;
1660     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1661                        ide_atapi_cmd);
1662     return false;
1663 }
1664 
1665 
1666 /*** CF-ATA commands ***/
1667 
1668 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1669 {
1670     s->error = 0x09;    /* miscellaneous error */
1671     s->status = READY_STAT | SEEK_STAT;
1672     ide_set_irq(s->bus);
1673 
1674     return false;
1675 }
1676 
1677 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1678 {
1679     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1680      * required for Windows 8 to work with AHCI */
1681 
1682     if (cmd == CFA_WEAR_LEVEL) {
1683         s->nsector = 0;
1684     }
1685 
1686     if (cmd == CFA_ERASE_SECTORS) {
1687         s->media_changed = 1;
1688     }
1689 
1690     return true;
1691 }
1692 
1693 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1694 {
1695     s->status = READY_STAT | SEEK_STAT;
1696 
1697     memset(s->io_buffer, 0, 0x200);
1698     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1699     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1700     s->io_buffer[0x02] = s->select;                 /* Head */
1701     s->io_buffer[0x03] = s->sector;                 /* Sector */
1702     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1703     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1704     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1705     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1706     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1707     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1708     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1709 
1710     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1711     ide_set_irq(s->bus);
1712 
1713     return false;
1714 }
1715 
1716 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1717 {
1718     switch (s->feature) {
1719     case 0x02:  /* Inquiry Metadata Storage */
1720         ide_cfata_metadata_inquiry(s);
1721         break;
1722     case 0x03:  /* Read Metadata Storage */
1723         ide_cfata_metadata_read(s);
1724         break;
1725     case 0x04:  /* Write Metadata Storage */
1726         ide_cfata_metadata_write(s);
1727         break;
1728     default:
1729         ide_abort_command(s);
1730         return true;
1731     }
1732 
1733     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1734     s->status = 0x00; /* NOTE: READY is _not_ set */
1735     ide_set_irq(s->bus);
1736 
1737     return false;
1738 }
1739 
1740 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1741 {
1742     switch (s->feature) {
1743     case 0x01:  /* sense temperature in device */
1744         s->nsector = 0x50;      /* +20 C */
1745         break;
1746     default:
1747         ide_abort_command(s);
1748         return true;
1749     }
1750 
1751     return true;
1752 }
1753 
1754 
1755 /*** SMART commands ***/
1756 
1757 static bool cmd_smart(IDEState *s, uint8_t cmd)
1758 {
1759     int n;
1760 
1761     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1762         goto abort_cmd;
1763     }
1764 
1765     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1766         goto abort_cmd;
1767     }
1768 
1769     switch (s->feature) {
1770     case SMART_DISABLE:
1771         s->smart_enabled = 0;
1772         return true;
1773 
1774     case SMART_ENABLE:
1775         s->smart_enabled = 1;
1776         return true;
1777 
1778     case SMART_ATTR_AUTOSAVE:
1779         switch (s->sector) {
1780         case 0x00:
1781             s->smart_autosave = 0;
1782             break;
1783         case 0xf1:
1784             s->smart_autosave = 1;
1785             break;
1786         default:
1787             goto abort_cmd;
1788         }
1789         return true;
1790 
1791     case SMART_STATUS:
1792         if (!s->smart_errors) {
1793             s->hcyl = 0xc2;
1794             s->lcyl = 0x4f;
1795         } else {
1796             s->hcyl = 0x2c;
1797             s->lcyl = 0xf4;
1798         }
1799         return true;
1800 
1801     case SMART_READ_THRESH:
1802         memset(s->io_buffer, 0, 0x200);
1803         s->io_buffer[0] = 0x01; /* smart struct version */
1804 
1805         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1806             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1807             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1808         }
1809 
1810         /* checksum */
1811         for (n = 0; n < 511; n++) {
1812             s->io_buffer[511] += s->io_buffer[n];
1813         }
1814         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1815 
1816         s->status = READY_STAT | SEEK_STAT;
1817         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1818         ide_set_irq(s->bus);
1819         return false;
1820 
1821     case SMART_READ_DATA:
1822         memset(s->io_buffer, 0, 0x200);
1823         s->io_buffer[0] = 0x01; /* smart struct version */
1824 
1825         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1826             int i;
1827             for (i = 0; i < 11; i++) {
1828                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1829             }
1830         }
1831 
1832         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1833         if (s->smart_selftest_count == 0) {
1834             s->io_buffer[363] = 0;
1835         } else {
1836             s->io_buffer[363] =
1837                 s->smart_selftest_data[3 +
1838                            (s->smart_selftest_count - 1) *
1839                            24];
1840         }
1841         s->io_buffer[364] = 0x20;
1842         s->io_buffer[365] = 0x01;
1843         /* offline data collection capacity: execute + self-test*/
1844         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1845         s->io_buffer[368] = 0x03; /* smart capability (1) */
1846         s->io_buffer[369] = 0x00; /* smart capability (2) */
1847         s->io_buffer[370] = 0x01; /* error logging supported */
1848         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1849         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1850         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1851 
1852         for (n = 0; n < 511; n++) {
1853             s->io_buffer[511] += s->io_buffer[n];
1854         }
1855         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1856 
1857         s->status = READY_STAT | SEEK_STAT;
1858         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1859         ide_set_irq(s->bus);
1860         return false;
1861 
1862     case SMART_READ_LOG:
1863         switch (s->sector) {
1864         case 0x01: /* summary smart error log */
1865             memset(s->io_buffer, 0, 0x200);
1866             s->io_buffer[0] = 0x01;
1867             s->io_buffer[1] = 0x00; /* no error entries */
1868             s->io_buffer[452] = s->smart_errors & 0xff;
1869             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1870 
1871             for (n = 0; n < 511; n++) {
1872                 s->io_buffer[511] += s->io_buffer[n];
1873             }
1874             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1875             break;
1876         case 0x06: /* smart self test log */
1877             memset(s->io_buffer, 0, 0x200);
1878             s->io_buffer[0] = 0x01;
1879             if (s->smart_selftest_count == 0) {
1880                 s->io_buffer[508] = 0;
1881             } else {
1882                 s->io_buffer[508] = s->smart_selftest_count;
1883                 for (n = 2; n < 506; n++)  {
1884                     s->io_buffer[n] = s->smart_selftest_data[n];
1885                 }
1886             }
1887 
1888             for (n = 0; n < 511; n++) {
1889                 s->io_buffer[511] += s->io_buffer[n];
1890             }
1891             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1892             break;
1893         default:
1894             goto abort_cmd;
1895         }
1896         s->status = READY_STAT | SEEK_STAT;
1897         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1898         ide_set_irq(s->bus);
1899         return false;
1900 
1901     case SMART_EXECUTE_OFFLINE:
1902         switch (s->sector) {
1903         case 0: /* off-line routine */
1904         case 1: /* short self test */
1905         case 2: /* extended self test */
1906             s->smart_selftest_count++;
1907             if (s->smart_selftest_count > 21) {
1908                 s->smart_selftest_count = 1;
1909             }
1910             n = 2 + (s->smart_selftest_count - 1) * 24;
1911             s->smart_selftest_data[n] = s->sector;
1912             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1913             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1914             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1915             break;
1916         default:
1917             goto abort_cmd;
1918         }
1919         return true;
1920     }
1921 
1922 abort_cmd:
1923     ide_abort_command(s);
1924     return true;
1925 }
1926 
1927 #define HD_OK (1u << IDE_HD)
1928 #define CD_OK (1u << IDE_CD)
1929 #define CFA_OK (1u << IDE_CFATA)
1930 #define HD_CFA_OK (HD_OK | CFA_OK)
1931 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1932 
1933 /* Set the Disk Seek Completed status bit during completion */
1934 #define SET_DSC (1u << 8)
1935 
1936 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1937 static const struct {
1938     /* Returns true if the completion code should be run */
1939     bool (*handler)(IDEState *s, uint8_t cmd);
1940     int flags;
1941 } ide_cmd_table[0x100] = {
1942     /* NOP not implemented, mandatory for CD */
1943     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1944     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1945     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1946     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1947     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1948     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1949     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1950     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1951     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1952     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1953     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1954     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1955     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1956     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1957     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
1958     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
1959     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
1960     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
1961     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
1962     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
1963     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
1964     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
1965     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
1966     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
1967     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
1968     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
1969     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
1970     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
1971     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1972     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
1973     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
1974     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
1975     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
1976     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1977     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1978     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
1979     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
1980     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1981     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
1982     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
1983     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
1984     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
1985     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
1986     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
1987     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
1988     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
1989     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
1990     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1991     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
1992     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
1993     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
1994     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
1995     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
1996     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1997     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1998     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1999 };
2000 
2001 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2002 {
2003     return cmd < ARRAY_SIZE(ide_cmd_table)
2004         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2005 }
2006 
2007 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2008 {
2009     IDEState *s;
2010     bool complete;
2011 
2012 #if defined(DEBUG_IDE)
2013     printf("ide: CMD=%02x\n", val);
2014 #endif
2015     s = idebus_active_if(bus);
2016     /* ignore commands to non existent slave */
2017     if (s != bus->ifs && !s->blk) {
2018         return;
2019     }
2020 
2021     /* Only RESET is allowed while BSY and/or DRQ are set,
2022      * and only to ATAPI devices. */
2023     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2024         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2025             return;
2026         }
2027     }
2028 
2029     if (!ide_cmd_permitted(s, val)) {
2030         ide_abort_command(s);
2031         ide_set_irq(s->bus);
2032         return;
2033     }
2034 
2035     s->status = READY_STAT | BUSY_STAT;
2036     s->error = 0;
2037     s->io_buffer_offset = 0;
2038 
2039     complete = ide_cmd_table[val].handler(s, val);
2040     if (complete) {
2041         s->status &= ~BUSY_STAT;
2042         assert(!!s->error == !!(s->status & ERR_STAT));
2043 
2044         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2045             s->status |= SEEK_STAT;
2046         }
2047 
2048         ide_cmd_done(s);
2049         ide_set_irq(s->bus);
2050     }
2051 }
2052 
2053 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
2054 {
2055     IDEBus *bus = opaque;
2056     IDEState *s = idebus_active_if(bus);
2057     uint32_t addr;
2058     int ret, hob;
2059 
2060     addr = addr1 & 7;
2061     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2062     //hob = s->select & (1 << 7);
2063     hob = 0;
2064     switch(addr) {
2065     case 0:
2066         ret = 0xff;
2067         break;
2068     case 1:
2069         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2070             (s != bus->ifs && !s->blk)) {
2071             ret = 0;
2072         } else if (!hob) {
2073             ret = s->error;
2074         } else {
2075 	    ret = s->hob_feature;
2076         }
2077         break;
2078     case 2:
2079         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2080             ret = 0;
2081         } else if (!hob) {
2082             ret = s->nsector & 0xff;
2083         } else {
2084 	    ret = s->hob_nsector;
2085         }
2086         break;
2087     case 3:
2088         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2089             ret = 0;
2090         } else if (!hob) {
2091             ret = s->sector;
2092         } else {
2093 	    ret = s->hob_sector;
2094         }
2095         break;
2096     case 4:
2097         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2098             ret = 0;
2099         } else if (!hob) {
2100             ret = s->lcyl;
2101         } else {
2102 	    ret = s->hob_lcyl;
2103         }
2104         break;
2105     case 5:
2106         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2107             ret = 0;
2108         } else if (!hob) {
2109             ret = s->hcyl;
2110         } else {
2111 	    ret = s->hob_hcyl;
2112         }
2113         break;
2114     case 6:
2115         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2116             ret = 0;
2117         } else {
2118             ret = s->select;
2119         }
2120         break;
2121     default:
2122     case 7:
2123         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2124             (s != bus->ifs && !s->blk)) {
2125             ret = 0;
2126         } else {
2127             ret = s->status;
2128         }
2129         qemu_irq_lower(bus->irq);
2130         break;
2131     }
2132 #ifdef DEBUG_IDE
2133     printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
2134 #endif
2135     return ret;
2136 }
2137 
2138 uint32_t ide_status_read(void *opaque, uint32_t addr)
2139 {
2140     IDEBus *bus = opaque;
2141     IDEState *s = idebus_active_if(bus);
2142     int ret;
2143 
2144     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2145         (s != bus->ifs && !s->blk)) {
2146         ret = 0;
2147     } else {
2148         ret = s->status;
2149     }
2150 #ifdef DEBUG_IDE
2151     printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
2152 #endif
2153     return ret;
2154 }
2155 
2156 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2157 {
2158     IDEBus *bus = opaque;
2159     IDEState *s;
2160     int i;
2161 
2162 #ifdef DEBUG_IDE
2163     printf("ide: write control addr=0x%x val=%02x\n", addr, val);
2164 #endif
2165     /* common for both drives */
2166     if (!(bus->cmd & IDE_CMD_RESET) &&
2167         (val & IDE_CMD_RESET)) {
2168         /* reset low to high */
2169         for(i = 0;i < 2; i++) {
2170             s = &bus->ifs[i];
2171             s->status = BUSY_STAT | SEEK_STAT;
2172             s->error = 0x01;
2173         }
2174     } else if ((bus->cmd & IDE_CMD_RESET) &&
2175                !(val & IDE_CMD_RESET)) {
2176         /* high to low */
2177         for(i = 0;i < 2; i++) {
2178             s = &bus->ifs[i];
2179             if (s->drive_kind == IDE_CD)
2180                 s->status = 0x00; /* NOTE: READY is _not_ set */
2181             else
2182                 s->status = READY_STAT | SEEK_STAT;
2183             ide_set_signature(s);
2184         }
2185     }
2186 
2187     bus->cmd = val;
2188 }
2189 
2190 /*
2191  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2192  * transferred from the device to the guest), false if it's a PIO in
2193  */
2194 static bool ide_is_pio_out(IDEState *s)
2195 {
2196     if (s->end_transfer_func == ide_sector_write ||
2197         s->end_transfer_func == ide_atapi_cmd) {
2198         return false;
2199     } else if (s->end_transfer_func == ide_sector_read ||
2200                s->end_transfer_func == ide_transfer_stop ||
2201                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2202                s->end_transfer_func == ide_dummy_transfer_stop) {
2203         return true;
2204     }
2205 
2206     abort();
2207 }
2208 
2209 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2210 {
2211     IDEBus *bus = opaque;
2212     IDEState *s = idebus_active_if(bus);
2213     uint8_t *p;
2214 
2215     /* PIO data access allowed only when DRQ bit is set. The result of a write
2216      * during PIO out is indeterminate, just ignore it. */
2217     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2218         return;
2219     }
2220 
2221     p = s->data_ptr;
2222     if (p + 2 > s->data_end) {
2223         return;
2224     }
2225 
2226     *(uint16_t *)p = le16_to_cpu(val);
2227     p += 2;
2228     s->data_ptr = p;
2229     if (p >= s->data_end) {
2230         s->status &= ~DRQ_STAT;
2231         s->end_transfer_func(s);
2232     }
2233 }
2234 
2235 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2236 {
2237     IDEBus *bus = opaque;
2238     IDEState *s = idebus_active_if(bus);
2239     uint8_t *p;
2240     int ret;
2241 
2242     /* PIO data access allowed only when DRQ bit is set. The result of a read
2243      * during PIO in is indeterminate, return 0 and don't move forward. */
2244     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2245         return 0;
2246     }
2247 
2248     p = s->data_ptr;
2249     if (p + 2 > s->data_end) {
2250         return 0;
2251     }
2252 
2253     ret = cpu_to_le16(*(uint16_t *)p);
2254     p += 2;
2255     s->data_ptr = p;
2256     if (p >= s->data_end) {
2257         s->status &= ~DRQ_STAT;
2258         s->end_transfer_func(s);
2259     }
2260     return ret;
2261 }
2262 
2263 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2264 {
2265     IDEBus *bus = opaque;
2266     IDEState *s = idebus_active_if(bus);
2267     uint8_t *p;
2268 
2269     /* PIO data access allowed only when DRQ bit is set. The result of a write
2270      * during PIO out is indeterminate, just ignore it. */
2271     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2272         return;
2273     }
2274 
2275     p = s->data_ptr;
2276     if (p + 4 > s->data_end) {
2277         return;
2278     }
2279 
2280     *(uint32_t *)p = le32_to_cpu(val);
2281     p += 4;
2282     s->data_ptr = p;
2283     if (p >= s->data_end) {
2284         s->status &= ~DRQ_STAT;
2285         s->end_transfer_func(s);
2286     }
2287 }
2288 
2289 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2290 {
2291     IDEBus *bus = opaque;
2292     IDEState *s = idebus_active_if(bus);
2293     uint8_t *p;
2294     int ret;
2295 
2296     /* PIO data access allowed only when DRQ bit is set. The result of a read
2297      * during PIO in is indeterminate, return 0 and don't move forward. */
2298     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2299         return 0;
2300     }
2301 
2302     p = s->data_ptr;
2303     if (p + 4 > s->data_end) {
2304         return 0;
2305     }
2306 
2307     ret = cpu_to_le32(*(uint32_t *)p);
2308     p += 4;
2309     s->data_ptr = p;
2310     if (p >= s->data_end) {
2311         s->status &= ~DRQ_STAT;
2312         s->end_transfer_func(s);
2313     }
2314     return ret;
2315 }
2316 
2317 static void ide_dummy_transfer_stop(IDEState *s)
2318 {
2319     s->data_ptr = s->io_buffer;
2320     s->data_end = s->io_buffer;
2321     s->io_buffer[0] = 0xff;
2322     s->io_buffer[1] = 0xff;
2323     s->io_buffer[2] = 0xff;
2324     s->io_buffer[3] = 0xff;
2325 }
2326 
2327 void ide_bus_reset(IDEBus *bus)
2328 {
2329     bus->unit = 0;
2330     bus->cmd = 0;
2331     ide_reset(&bus->ifs[0]);
2332     ide_reset(&bus->ifs[1]);
2333     ide_clear_hob(bus);
2334 
2335     /* pending async DMA */
2336     if (bus->dma->aiocb) {
2337 #ifdef DEBUG_AIO
2338         printf("aio_cancel\n");
2339 #endif
2340         blk_aio_cancel(bus->dma->aiocb);
2341         bus->dma->aiocb = NULL;
2342     }
2343 
2344     /* reset dma provider too */
2345     if (bus->dma->ops->reset) {
2346         bus->dma->ops->reset(bus->dma);
2347     }
2348 }
2349 
2350 static bool ide_cd_is_tray_open(void *opaque)
2351 {
2352     return ((IDEState *)opaque)->tray_open;
2353 }
2354 
2355 static bool ide_cd_is_medium_locked(void *opaque)
2356 {
2357     return ((IDEState *)opaque)->tray_locked;
2358 }
2359 
2360 static void ide_resize_cb(void *opaque)
2361 {
2362     IDEState *s = opaque;
2363     uint64_t nb_sectors;
2364 
2365     if (!s->identify_set) {
2366         return;
2367     }
2368 
2369     blk_get_geometry(s->blk, &nb_sectors);
2370     s->nb_sectors = nb_sectors;
2371 
2372     /* Update the identify data buffer. */
2373     if (s->drive_kind == IDE_CFATA) {
2374         ide_cfata_identify_size(s);
2375     } else {
2376         /* IDE_CD uses a different set of callbacks entirely. */
2377         assert(s->drive_kind != IDE_CD);
2378         ide_identify_size(s);
2379     }
2380 }
2381 
2382 static const BlockDevOps ide_cd_block_ops = {
2383     .change_media_cb = ide_cd_change_cb,
2384     .eject_request_cb = ide_cd_eject_request_cb,
2385     .is_tray_open = ide_cd_is_tray_open,
2386     .is_medium_locked = ide_cd_is_medium_locked,
2387 };
2388 
2389 static const BlockDevOps ide_hd_block_ops = {
2390     .resize_cb = ide_resize_cb,
2391 };
2392 
2393 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2394                    const char *version, const char *serial, const char *model,
2395                    uint64_t wwn,
2396                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2397                    int chs_trans)
2398 {
2399     uint64_t nb_sectors;
2400 
2401     s->blk = blk;
2402     s->drive_kind = kind;
2403 
2404     blk_get_geometry(blk, &nb_sectors);
2405     s->cylinders = cylinders;
2406     s->heads = heads;
2407     s->sectors = secs;
2408     s->chs_trans = chs_trans;
2409     s->nb_sectors = nb_sectors;
2410     s->wwn = wwn;
2411     /* The SMART values should be preserved across power cycles
2412        but they aren't.  */
2413     s->smart_enabled = 1;
2414     s->smart_autosave = 1;
2415     s->smart_errors = 0;
2416     s->smart_selftest_count = 0;
2417     if (kind == IDE_CD) {
2418         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2419         blk_set_guest_block_size(blk, 2048);
2420     } else {
2421         if (!blk_is_inserted(s->blk)) {
2422             error_report("Device needs media, but drive is empty");
2423             return -1;
2424         }
2425         if (blk_is_read_only(blk)) {
2426             error_report("Can't use a read-only drive");
2427             return -1;
2428         }
2429         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2430     }
2431     if (serial) {
2432         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2433     } else {
2434         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2435                  "QM%05d", s->drive_serial);
2436     }
2437     if (model) {
2438         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2439     } else {
2440         switch (kind) {
2441         case IDE_CD:
2442             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2443             break;
2444         case IDE_CFATA:
2445             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2446             break;
2447         default:
2448             strcpy(s->drive_model_str, "QEMU HARDDISK");
2449             break;
2450         }
2451     }
2452 
2453     if (version) {
2454         pstrcpy(s->version, sizeof(s->version), version);
2455     } else {
2456         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2457     }
2458 
2459     ide_reset(s);
2460     blk_iostatus_enable(blk);
2461     return 0;
2462 }
2463 
2464 static void ide_init1(IDEBus *bus, int unit)
2465 {
2466     static int drive_serial = 1;
2467     IDEState *s = &bus->ifs[unit];
2468 
2469     s->bus = bus;
2470     s->unit = unit;
2471     s->drive_serial = drive_serial++;
2472     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2473     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2474     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2475     memset(s->io_buffer, 0, s->io_buffer_total_len);
2476 
2477     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2478     memset(s->smart_selftest_data, 0, 512);
2479 
2480     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2481                                            ide_sector_write_timer_cb, s);
2482 }
2483 
2484 static int ide_nop_int(IDEDMA *dma, int x)
2485 {
2486     return 0;
2487 }
2488 
2489 static void ide_nop(IDEDMA *dma)
2490 {
2491 }
2492 
2493 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2494 {
2495     return 0;
2496 }
2497 
2498 static const IDEDMAOps ide_dma_nop_ops = {
2499     .prepare_buf    = ide_nop_int32,
2500     .restart_dma    = ide_nop,
2501     .rw_buf         = ide_nop_int,
2502 };
2503 
2504 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2505 {
2506     s->unit = s->bus->retry_unit;
2507     ide_set_sector(s, s->bus->retry_sector_num);
2508     s->nsector = s->bus->retry_nsector;
2509     s->bus->dma->ops->restart_dma(s->bus->dma);
2510     s->io_buffer_size = 0;
2511     s->dma_cmd = dma_cmd;
2512     ide_start_dma(s, ide_dma_cb);
2513 }
2514 
2515 static void ide_restart_bh(void *opaque)
2516 {
2517     IDEBus *bus = opaque;
2518     IDEState *s;
2519     bool is_read;
2520     int error_status;
2521 
2522     qemu_bh_delete(bus->bh);
2523     bus->bh = NULL;
2524 
2525     error_status = bus->error_status;
2526     if (bus->error_status == 0) {
2527         return;
2528     }
2529 
2530     s = idebus_active_if(bus);
2531     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2532 
2533     /* The error status must be cleared before resubmitting the request: The
2534      * request may fail again, and this case can only be distinguished if the
2535      * called function can set a new error status. */
2536     bus->error_status = 0;
2537 
2538     /* The HBA has generically asked to be kicked on retry */
2539     if (error_status & IDE_RETRY_HBA) {
2540         if (s->bus->dma->ops->restart) {
2541             s->bus->dma->ops->restart(s->bus->dma);
2542         }
2543     } else if (IS_IDE_RETRY_DMA(error_status)) {
2544         if (error_status & IDE_RETRY_TRIM) {
2545             ide_restart_dma(s, IDE_DMA_TRIM);
2546         } else {
2547             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2548         }
2549     } else if (IS_IDE_RETRY_PIO(error_status)) {
2550         if (is_read) {
2551             ide_sector_read(s);
2552         } else {
2553             ide_sector_write(s);
2554         }
2555     } else if (error_status & IDE_RETRY_FLUSH) {
2556         ide_flush_cache(s);
2557     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2558         assert(s->end_transfer_func == ide_atapi_cmd);
2559         ide_atapi_dma_restart(s);
2560     } else {
2561         abort();
2562     }
2563 }
2564 
2565 static void ide_restart_cb(void *opaque, int running, RunState state)
2566 {
2567     IDEBus *bus = opaque;
2568 
2569     if (!running)
2570         return;
2571 
2572     if (!bus->bh) {
2573         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2574         qemu_bh_schedule(bus->bh);
2575     }
2576 }
2577 
2578 void ide_register_restart_cb(IDEBus *bus)
2579 {
2580     if (bus->dma->ops->restart_dma) {
2581         qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2582     }
2583 }
2584 
2585 static IDEDMA ide_dma_nop = {
2586     .ops = &ide_dma_nop_ops,
2587     .aiocb = NULL,
2588 };
2589 
2590 void ide_init2(IDEBus *bus, qemu_irq irq)
2591 {
2592     int i;
2593 
2594     for(i = 0; i < 2; i++) {
2595         ide_init1(bus, i);
2596         ide_reset(&bus->ifs[i]);
2597     }
2598     bus->irq = irq;
2599     bus->dma = &ide_dma_nop;
2600 }
2601 
2602 static const MemoryRegionPortio ide_portio_list[] = {
2603     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2604     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2605     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2606     PORTIO_END_OF_LIST(),
2607 };
2608 
2609 static const MemoryRegionPortio ide_portio2_list[] = {
2610     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2611     PORTIO_END_OF_LIST(),
2612 };
2613 
2614 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2615 {
2616     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2617        bridge has been setup properly to always register with ISA.  */
2618     isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2619 
2620     if (iobase2) {
2621         isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2622     }
2623 }
2624 
2625 static bool is_identify_set(void *opaque, int version_id)
2626 {
2627     IDEState *s = opaque;
2628 
2629     return s->identify_set != 0;
2630 }
2631 
2632 static EndTransferFunc* transfer_end_table[] = {
2633         ide_sector_read,
2634         ide_sector_write,
2635         ide_transfer_stop,
2636         ide_atapi_cmd_reply_end,
2637         ide_atapi_cmd,
2638         ide_dummy_transfer_stop,
2639 };
2640 
2641 static int transfer_end_table_idx(EndTransferFunc *fn)
2642 {
2643     int i;
2644 
2645     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2646         if (transfer_end_table[i] == fn)
2647             return i;
2648 
2649     return -1;
2650 }
2651 
2652 static int ide_drive_post_load(void *opaque, int version_id)
2653 {
2654     IDEState *s = opaque;
2655 
2656     if (s->blk && s->identify_set) {
2657         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2658     }
2659     return 0;
2660 }
2661 
2662 static int ide_drive_pio_post_load(void *opaque, int version_id)
2663 {
2664     IDEState *s = opaque;
2665 
2666     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2667         return -EINVAL;
2668     }
2669     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2670     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2671     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2672     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2673 
2674     return 0;
2675 }
2676 
2677 static void ide_drive_pio_pre_save(void *opaque)
2678 {
2679     IDEState *s = opaque;
2680     int idx;
2681 
2682     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2683     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2684 
2685     idx = transfer_end_table_idx(s->end_transfer_func);
2686     if (idx == -1) {
2687         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2688                         __func__);
2689         s->end_transfer_fn_idx = 2;
2690     } else {
2691         s->end_transfer_fn_idx = idx;
2692     }
2693 }
2694 
2695 static bool ide_drive_pio_state_needed(void *opaque)
2696 {
2697     IDEState *s = opaque;
2698 
2699     return ((s->status & DRQ_STAT) != 0)
2700         || (s->bus->error_status & IDE_RETRY_PIO);
2701 }
2702 
2703 static bool ide_tray_state_needed(void *opaque)
2704 {
2705     IDEState *s = opaque;
2706 
2707     return s->tray_open || s->tray_locked;
2708 }
2709 
2710 static bool ide_atapi_gesn_needed(void *opaque)
2711 {
2712     IDEState *s = opaque;
2713 
2714     return s->events.new_media || s->events.eject_request;
2715 }
2716 
2717 static bool ide_error_needed(void *opaque)
2718 {
2719     IDEBus *bus = opaque;
2720 
2721     return (bus->error_status != 0);
2722 }
2723 
2724 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2725 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2726     .name ="ide_drive/atapi/gesn_state",
2727     .version_id = 1,
2728     .minimum_version_id = 1,
2729     .needed = ide_atapi_gesn_needed,
2730     .fields = (VMStateField[]) {
2731         VMSTATE_BOOL(events.new_media, IDEState),
2732         VMSTATE_BOOL(events.eject_request, IDEState),
2733         VMSTATE_END_OF_LIST()
2734     }
2735 };
2736 
2737 static const VMStateDescription vmstate_ide_tray_state = {
2738     .name = "ide_drive/tray_state",
2739     .version_id = 1,
2740     .minimum_version_id = 1,
2741     .needed = ide_tray_state_needed,
2742     .fields = (VMStateField[]) {
2743         VMSTATE_BOOL(tray_open, IDEState),
2744         VMSTATE_BOOL(tray_locked, IDEState),
2745         VMSTATE_END_OF_LIST()
2746     }
2747 };
2748 
2749 static const VMStateDescription vmstate_ide_drive_pio_state = {
2750     .name = "ide_drive/pio_state",
2751     .version_id = 1,
2752     .minimum_version_id = 1,
2753     .pre_save = ide_drive_pio_pre_save,
2754     .post_load = ide_drive_pio_post_load,
2755     .needed = ide_drive_pio_state_needed,
2756     .fields = (VMStateField[]) {
2757         VMSTATE_INT32(req_nb_sectors, IDEState),
2758         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2759 			     vmstate_info_uint8, uint8_t),
2760         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2761         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2762         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2763         VMSTATE_INT32(elementary_transfer_size, IDEState),
2764         VMSTATE_INT32(packet_transfer_size, IDEState),
2765         VMSTATE_END_OF_LIST()
2766     }
2767 };
2768 
2769 const VMStateDescription vmstate_ide_drive = {
2770     .name = "ide_drive",
2771     .version_id = 3,
2772     .minimum_version_id = 0,
2773     .post_load = ide_drive_post_load,
2774     .fields = (VMStateField[]) {
2775         VMSTATE_INT32(mult_sectors, IDEState),
2776         VMSTATE_INT32(identify_set, IDEState),
2777         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2778         VMSTATE_UINT8(feature, IDEState),
2779         VMSTATE_UINT8(error, IDEState),
2780         VMSTATE_UINT32(nsector, IDEState),
2781         VMSTATE_UINT8(sector, IDEState),
2782         VMSTATE_UINT8(lcyl, IDEState),
2783         VMSTATE_UINT8(hcyl, IDEState),
2784         VMSTATE_UINT8(hob_feature, IDEState),
2785         VMSTATE_UINT8(hob_sector, IDEState),
2786         VMSTATE_UINT8(hob_nsector, IDEState),
2787         VMSTATE_UINT8(hob_lcyl, IDEState),
2788         VMSTATE_UINT8(hob_hcyl, IDEState),
2789         VMSTATE_UINT8(select, IDEState),
2790         VMSTATE_UINT8(status, IDEState),
2791         VMSTATE_UINT8(lba48, IDEState),
2792         VMSTATE_UINT8(sense_key, IDEState),
2793         VMSTATE_UINT8(asc, IDEState),
2794         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2795         VMSTATE_END_OF_LIST()
2796     },
2797     .subsections = (const VMStateDescription*[]) {
2798         &vmstate_ide_drive_pio_state,
2799         &vmstate_ide_tray_state,
2800         &vmstate_ide_atapi_gesn_state,
2801         NULL
2802     }
2803 };
2804 
2805 static const VMStateDescription vmstate_ide_error_status = {
2806     .name ="ide_bus/error",
2807     .version_id = 2,
2808     .minimum_version_id = 1,
2809     .needed = ide_error_needed,
2810     .fields = (VMStateField[]) {
2811         VMSTATE_INT32(error_status, IDEBus),
2812         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2813         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2814         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2815         VMSTATE_END_OF_LIST()
2816     }
2817 };
2818 
2819 const VMStateDescription vmstate_ide_bus = {
2820     .name = "ide_bus",
2821     .version_id = 1,
2822     .minimum_version_id = 1,
2823     .fields = (VMStateField[]) {
2824         VMSTATE_UINT8(cmd, IDEBus),
2825         VMSTATE_UINT8(unit, IDEBus),
2826         VMSTATE_END_OF_LIST()
2827     },
2828     .subsections = (const VMStateDescription*[]) {
2829         &vmstate_ide_error_status,
2830         NULL
2831     }
2832 };
2833 
2834 void ide_drive_get(DriveInfo **hd, int n)
2835 {
2836     int i;
2837     int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2838     int max_devs = drive_get_max_devs(IF_IDE);
2839     int n_buses = max_devs ? (n / max_devs) : n;
2840 
2841     /*
2842      * Note: The number of actual buses available is not known.
2843      * We compute this based on the size of the DriveInfo* array, n.
2844      * If it is less than max_devs * <num_real_buses>,
2845      * We will stop looking for drives prematurely instead of overfilling
2846      * the array.
2847      */
2848 
2849     if (highest_bus > n_buses) {
2850         error_report("Too many IDE buses defined (%d > %d)",
2851                      highest_bus, n_buses);
2852         exit(1);
2853     }
2854 
2855     for (i = 0; i < n; i++) {
2856         hd[i] = drive_get_by_index(IF_IDE, i);
2857     }
2858 }
2859