xref: /openbmc/qemu/hw/ide/core.c (revision 709395f8)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/hw.h"
28 #include "hw/isa/isa.h"
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/blockdev.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qapi/error.h"
37 #include "qemu/cutils.h"
38 #include "sysemu/replay.h"
39 
40 #include "hw/ide/internal.h"
41 #include "trace.h"
42 
43 /* These values were based on a Seagate ST3500418AS but have been modified
44    to make more sense in QEMU */
45 static const int smart_attributes[][12] = {
46     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
47     /* raw read error rate*/
48     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
49     /* spin up */
50     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
51     /* start stop count */
52     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
53     /* remapped sectors */
54     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
55     /* power on hours */
56     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
57     /* power cycle count */
58     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
59     /* airflow-temperature-celsius */
60     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
61 };
62 
63 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
64     [IDE_DMA_READ] = "DMA READ",
65     [IDE_DMA_WRITE] = "DMA WRITE",
66     [IDE_DMA_TRIM] = "DMA TRIM",
67     [IDE_DMA_ATAPI] = "DMA ATAPI"
68 };
69 
70 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
71 {
72     if ((unsigned)enval < IDE_DMA__COUNT) {
73         return IDE_DMA_CMD_lookup[enval];
74     }
75     return "DMA UNKNOWN CMD";
76 }
77 
78 static void ide_dummy_transfer_stop(IDEState *s);
79 
80 static void padstr(char *str, const char *src, int len)
81 {
82     int i, v;
83     for(i = 0; i < len; i++) {
84         if (*src)
85             v = *src++;
86         else
87             v = ' ';
88         str[i^1] = v;
89     }
90 }
91 
92 static void put_le16(uint16_t *p, unsigned int v)
93 {
94     *p = cpu_to_le16(v);
95 }
96 
97 static void ide_identify_size(IDEState *s)
98 {
99     uint16_t *p = (uint16_t *)s->identify_data;
100     put_le16(p + 60, s->nb_sectors);
101     put_le16(p + 61, s->nb_sectors >> 16);
102     put_le16(p + 100, s->nb_sectors);
103     put_le16(p + 101, s->nb_sectors >> 16);
104     put_le16(p + 102, s->nb_sectors >> 32);
105     put_le16(p + 103, s->nb_sectors >> 48);
106 }
107 
108 static void ide_identify(IDEState *s)
109 {
110     uint16_t *p;
111     unsigned int oldsize;
112     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
113 
114     p = (uint16_t *)s->identify_data;
115     if (s->identify_set) {
116         goto fill_buffer;
117     }
118     memset(p, 0, sizeof(s->identify_data));
119 
120     put_le16(p + 0, 0x0040);
121     put_le16(p + 1, s->cylinders);
122     put_le16(p + 3, s->heads);
123     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
124     put_le16(p + 5, 512); /* XXX: retired, remove ? */
125     put_le16(p + 6, s->sectors);
126     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
127     put_le16(p + 20, 3); /* XXX: retired, remove ? */
128     put_le16(p + 21, 512); /* cache size in sectors */
129     put_le16(p + 22, 4); /* ecc bytes */
130     padstr((char *)(p + 23), s->version, 8); /* firmware version */
131     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
132 #if MAX_MULT_SECTORS > 1
133     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
134 #endif
135     put_le16(p + 48, 1); /* dword I/O */
136     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
137     put_le16(p + 51, 0x200); /* PIO transfer cycle */
138     put_le16(p + 52, 0x200); /* DMA transfer cycle */
139     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
140     put_le16(p + 54, s->cylinders);
141     put_le16(p + 55, s->heads);
142     put_le16(p + 56, s->sectors);
143     oldsize = s->cylinders * s->heads * s->sectors;
144     put_le16(p + 57, oldsize);
145     put_le16(p + 58, oldsize >> 16);
146     if (s->mult_sectors)
147         put_le16(p + 59, 0x100 | s->mult_sectors);
148     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
149     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
150     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
151     put_le16(p + 63, 0x07); /* mdma0-2 supported */
152     put_le16(p + 64, 0x03); /* pio3-4 supported */
153     put_le16(p + 65, 120);
154     put_le16(p + 66, 120);
155     put_le16(p + 67, 120);
156     put_le16(p + 68, 120);
157     if (dev && dev->conf.discard_granularity) {
158         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
159     }
160 
161     if (s->ncq_queues) {
162         put_le16(p + 75, s->ncq_queues - 1);
163         /* NCQ supported */
164         put_le16(p + 76, (1 << 8));
165     }
166 
167     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
168     put_le16(p + 81, 0x16); /* conforms to ata5 */
169     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
170     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
171     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
172     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
173     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
174     if (s->wwn) {
175         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
176     } else {
177         put_le16(p + 84, (1 << 14) | 0);
178     }
179     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
180     if (blk_enable_write_cache(s->blk)) {
181         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
182     } else {
183         put_le16(p + 85, (1 << 14) | 1);
184     }
185     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
186     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
187     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
188     if (s->wwn) {
189         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
190     } else {
191         put_le16(p + 87, (1 << 14) | 0);
192     }
193     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
194     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
195     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
196     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
197     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
198     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
199 
200     if (dev && dev->conf.physical_block_size)
201         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
202     if (s->wwn) {
203         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
204         put_le16(p + 108, s->wwn >> 48);
205         put_le16(p + 109, s->wwn >> 32);
206         put_le16(p + 110, s->wwn >> 16);
207         put_le16(p + 111, s->wwn);
208     }
209     if (dev && dev->conf.discard_granularity) {
210         put_le16(p + 169, 1); /* TRIM support */
211     }
212     if (dev) {
213         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
214     }
215 
216     ide_identify_size(s);
217     s->identify_set = 1;
218 
219 fill_buffer:
220     memcpy(s->io_buffer, p, sizeof(s->identify_data));
221 }
222 
223 static void ide_atapi_identify(IDEState *s)
224 {
225     uint16_t *p;
226 
227     p = (uint16_t *)s->identify_data;
228     if (s->identify_set) {
229         goto fill_buffer;
230     }
231     memset(p, 0, sizeof(s->identify_data));
232 
233     /* Removable CDROM, 50us response, 12 byte packets */
234     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
235     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
236     put_le16(p + 20, 3); /* buffer type */
237     put_le16(p + 21, 512); /* cache size in sectors */
238     put_le16(p + 22, 4); /* ecc bytes */
239     padstr((char *)(p + 23), s->version, 8); /* firmware version */
240     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
241     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
242 #ifdef USE_DMA_CDROM
243     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
244     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
245     put_le16(p + 62, 7);  /* single word dma0-2 supported */
246     put_le16(p + 63, 7);  /* mdma0-2 supported */
247 #else
248     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
249     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
250     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
251 #endif
252     put_le16(p + 64, 3); /* pio3-4 supported */
253     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
254     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
255     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
256     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
257 
258     put_le16(p + 71, 30); /* in ns */
259     put_le16(p + 72, 30); /* in ns */
260 
261     if (s->ncq_queues) {
262         put_le16(p + 75, s->ncq_queues - 1);
263         /* NCQ supported */
264         put_le16(p + 76, (1 << 8));
265     }
266 
267     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
268     if (s->wwn) {
269         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
270         put_le16(p + 87, (1 << 8)); /* WWN enabled */
271     }
272 
273 #ifdef USE_DMA_CDROM
274     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
275 #endif
276 
277     if (s->wwn) {
278         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
279         put_le16(p + 108, s->wwn >> 48);
280         put_le16(p + 109, s->wwn >> 32);
281         put_le16(p + 110, s->wwn >> 16);
282         put_le16(p + 111, s->wwn);
283     }
284 
285     s->identify_set = 1;
286 
287 fill_buffer:
288     memcpy(s->io_buffer, p, sizeof(s->identify_data));
289 }
290 
291 static void ide_cfata_identify_size(IDEState *s)
292 {
293     uint16_t *p = (uint16_t *)s->identify_data;
294     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
295     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
296     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
297     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
298 }
299 
300 static void ide_cfata_identify(IDEState *s)
301 {
302     uint16_t *p;
303     uint32_t cur_sec;
304 
305     p = (uint16_t *)s->identify_data;
306     if (s->identify_set) {
307         goto fill_buffer;
308     }
309     memset(p, 0, sizeof(s->identify_data));
310 
311     cur_sec = s->cylinders * s->heads * s->sectors;
312 
313     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
314     put_le16(p + 1, s->cylinders);		/* Default cylinders */
315     put_le16(p + 3, s->heads);			/* Default heads */
316     put_le16(p + 6, s->sectors);		/* Default sectors per track */
317     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
318     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
319     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
320     put_le16(p + 22, 0x0004);			/* ECC bytes */
321     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
322     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
323 #if MAX_MULT_SECTORS > 1
324     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
325 #else
326     put_le16(p + 47, 0x0000);
327 #endif
328     put_le16(p + 49, 0x0f00);			/* Capabilities */
329     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
330     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
331     put_le16(p + 53, 0x0003);			/* Translation params valid */
332     put_le16(p + 54, s->cylinders);		/* Current cylinders */
333     put_le16(p + 55, s->heads);			/* Current heads */
334     put_le16(p + 56, s->sectors);		/* Current sectors */
335     put_le16(p + 57, cur_sec);			/* Current capacity */
336     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
337     if (s->mult_sectors)			/* Multiple sector setting */
338         put_le16(p + 59, 0x100 | s->mult_sectors);
339     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
340     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
341     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
342     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
343     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
344     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
345     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
346     put_le16(p + 82, 0x400c);			/* Command Set supported */
347     put_le16(p + 83, 0x7068);			/* Command Set supported */
348     put_le16(p + 84, 0x4000);			/* Features supported */
349     put_le16(p + 85, 0x000c);			/* Command Set enabled */
350     put_le16(p + 86, 0x7044);			/* Command Set enabled */
351     put_le16(p + 87, 0x4000);			/* Features enabled */
352     put_le16(p + 91, 0x4060);			/* Current APM level */
353     put_le16(p + 129, 0x0002);			/* Current features option */
354     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
355     put_le16(p + 131, 0x0001);			/* Initial power mode */
356     put_le16(p + 132, 0x0000);			/* User signature */
357     put_le16(p + 160, 0x8100);			/* Power requirement */
358     put_le16(p + 161, 0x8001);			/* CF command set */
359 
360     ide_cfata_identify_size(s);
361     s->identify_set = 1;
362 
363 fill_buffer:
364     memcpy(s->io_buffer, p, sizeof(s->identify_data));
365 }
366 
367 static void ide_set_signature(IDEState *s)
368 {
369     s->select &= 0xf0; /* clear head */
370     /* put signature */
371     s->nsector = 1;
372     s->sector = 1;
373     if (s->drive_kind == IDE_CD) {
374         s->lcyl = 0x14;
375         s->hcyl = 0xeb;
376     } else if (s->blk) {
377         s->lcyl = 0;
378         s->hcyl = 0;
379     } else {
380         s->lcyl = 0xff;
381         s->hcyl = 0xff;
382     }
383 }
384 
385 static bool ide_sect_range_ok(IDEState *s,
386                               uint64_t sector, uint64_t nb_sectors)
387 {
388     uint64_t total_sectors;
389 
390     blk_get_geometry(s->blk, &total_sectors);
391     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
392         return false;
393     }
394     return true;
395 }
396 
397 typedef struct TrimAIOCB {
398     BlockAIOCB common;
399     IDEState *s;
400     QEMUBH *bh;
401     int ret;
402     QEMUIOVector *qiov;
403     BlockAIOCB *aiocb;
404     int i, j;
405 } TrimAIOCB;
406 
407 static void trim_aio_cancel(BlockAIOCB *acb)
408 {
409     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
410 
411     /* Exit the loop so ide_issue_trim_cb will not continue  */
412     iocb->j = iocb->qiov->niov - 1;
413     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
414 
415     iocb->ret = -ECANCELED;
416 
417     if (iocb->aiocb) {
418         blk_aio_cancel_async(iocb->aiocb);
419         iocb->aiocb = NULL;
420     }
421 }
422 
423 static const AIOCBInfo trim_aiocb_info = {
424     .aiocb_size         = sizeof(TrimAIOCB),
425     .cancel_async       = trim_aio_cancel,
426 };
427 
428 static void ide_trim_bh_cb(void *opaque)
429 {
430     TrimAIOCB *iocb = opaque;
431 
432     iocb->common.cb(iocb->common.opaque, iocb->ret);
433 
434     qemu_bh_delete(iocb->bh);
435     iocb->bh = NULL;
436     qemu_aio_unref(iocb);
437 }
438 
439 static void ide_issue_trim_cb(void *opaque, int ret)
440 {
441     TrimAIOCB *iocb = opaque;
442     IDEState *s = iocb->s;
443 
444     if (ret >= 0) {
445         while (iocb->j < iocb->qiov->niov) {
446             int j = iocb->j;
447             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
448                 int i = iocb->i;
449                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
450 
451                 /* 6-byte LBA + 2-byte range per entry */
452                 uint64_t entry = le64_to_cpu(buffer[i]);
453                 uint64_t sector = entry & 0x0000ffffffffffffULL;
454                 uint16_t count = entry >> 48;
455 
456                 if (count == 0) {
457                     continue;
458                 }
459 
460                 if (!ide_sect_range_ok(s, sector, count)) {
461                     iocb->ret = -EINVAL;
462                     goto done;
463                 }
464 
465                 /* Got an entry! Submit and exit.  */
466                 iocb->aiocb = blk_aio_pdiscard(s->blk,
467                                                sector << BDRV_SECTOR_BITS,
468                                                count << BDRV_SECTOR_BITS,
469                                                ide_issue_trim_cb, opaque);
470                 return;
471             }
472 
473             iocb->j++;
474             iocb->i = -1;
475         }
476     } else {
477         iocb->ret = ret;
478     }
479 
480 done:
481     iocb->aiocb = NULL;
482     if (iocb->bh) {
483         replay_bh_schedule_event(iocb->bh);
484     }
485 }
486 
487 BlockAIOCB *ide_issue_trim(
488         int64_t offset, QEMUIOVector *qiov,
489         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
490 {
491     IDEState *s = opaque;
492     TrimAIOCB *iocb;
493 
494     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
495     iocb->s = s;
496     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
497     iocb->ret = 0;
498     iocb->qiov = qiov;
499     iocb->i = -1;
500     iocb->j = 0;
501     ide_issue_trim_cb(iocb, 0);
502     return &iocb->common;
503 }
504 
505 void ide_abort_command(IDEState *s)
506 {
507     ide_transfer_stop(s);
508     s->status = READY_STAT | ERR_STAT;
509     s->error = ABRT_ERR;
510 }
511 
512 static void ide_set_retry(IDEState *s)
513 {
514     s->bus->retry_unit = s->unit;
515     s->bus->retry_sector_num = ide_get_sector(s);
516     s->bus->retry_nsector = s->nsector;
517 }
518 
519 static void ide_clear_retry(IDEState *s)
520 {
521     s->bus->retry_unit = -1;
522     s->bus->retry_sector_num = 0;
523     s->bus->retry_nsector = 0;
524 }
525 
526 /* prepare data transfer and tell what to do after */
527 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
528                                   EndTransferFunc *end_transfer_func)
529 {
530     s->data_ptr = buf;
531     s->data_end = buf + size;
532     ide_set_retry(s);
533     if (!(s->status & ERR_STAT)) {
534         s->status |= DRQ_STAT;
535     }
536     if (!s->bus->dma->ops->pio_transfer) {
537         s->end_transfer_func = end_transfer_func;
538         return false;
539     }
540     s->bus->dma->ops->pio_transfer(s->bus->dma);
541     return true;
542 }
543 
544 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
545                         EndTransferFunc *end_transfer_func)
546 {
547     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
548         end_transfer_func(s);
549     }
550 }
551 
552 static void ide_cmd_done(IDEState *s)
553 {
554     if (s->bus->dma->ops->cmd_done) {
555         s->bus->dma->ops->cmd_done(s->bus->dma);
556     }
557 }
558 
559 static void ide_transfer_halt(IDEState *s)
560 {
561     s->end_transfer_func = ide_transfer_stop;
562     s->data_ptr = s->io_buffer;
563     s->data_end = s->io_buffer;
564     s->status &= ~DRQ_STAT;
565 }
566 
567 void ide_transfer_stop(IDEState *s)
568 {
569     ide_transfer_halt(s);
570     ide_cmd_done(s);
571 }
572 
573 int64_t ide_get_sector(IDEState *s)
574 {
575     int64_t sector_num;
576     if (s->select & 0x40) {
577         /* lba */
578         if (!s->lba48) {
579             sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
580                 (s->lcyl << 8) | s->sector;
581         } else {
582             sector_num = ((int64_t)s->hob_hcyl << 40) |
583                 ((int64_t) s->hob_lcyl << 32) |
584                 ((int64_t) s->hob_sector << 24) |
585                 ((int64_t) s->hcyl << 16) |
586                 ((int64_t) s->lcyl << 8) | s->sector;
587         }
588     } else {
589         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
590             (s->select & 0x0f) * s->sectors + (s->sector - 1);
591     }
592     return sector_num;
593 }
594 
595 void ide_set_sector(IDEState *s, int64_t sector_num)
596 {
597     unsigned int cyl, r;
598     if (s->select & 0x40) {
599         if (!s->lba48) {
600             s->select = (s->select & 0xf0) | (sector_num >> 24);
601             s->hcyl = (sector_num >> 16);
602             s->lcyl = (sector_num >> 8);
603             s->sector = (sector_num);
604         } else {
605             s->sector = sector_num;
606             s->lcyl = sector_num >> 8;
607             s->hcyl = sector_num >> 16;
608             s->hob_sector = sector_num >> 24;
609             s->hob_lcyl = sector_num >> 32;
610             s->hob_hcyl = sector_num >> 40;
611         }
612     } else {
613         cyl = sector_num / (s->heads * s->sectors);
614         r = sector_num % (s->heads * s->sectors);
615         s->hcyl = cyl >> 8;
616         s->lcyl = cyl;
617         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
618         s->sector = (r % s->sectors) + 1;
619     }
620 }
621 
622 static void ide_rw_error(IDEState *s) {
623     ide_abort_command(s);
624     ide_set_irq(s->bus);
625 }
626 
627 static void ide_buffered_readv_cb(void *opaque, int ret)
628 {
629     IDEBufferedRequest *req = opaque;
630     if (!req->orphaned) {
631         if (!ret) {
632             assert(req->qiov.size == req->original_qiov->size);
633             qemu_iovec_from_buf(req->original_qiov, 0,
634                                 req->qiov.local_iov.iov_base,
635                                 req->original_qiov->size);
636         }
637         req->original_cb(req->original_opaque, ret);
638     }
639     QLIST_REMOVE(req, list);
640     qemu_vfree(qemu_iovec_buf(&req->qiov));
641     g_free(req);
642 }
643 
644 #define MAX_BUFFERED_REQS 16
645 
646 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
647                                QEMUIOVector *iov, int nb_sectors,
648                                BlockCompletionFunc *cb, void *opaque)
649 {
650     BlockAIOCB *aioreq;
651     IDEBufferedRequest *req;
652     int c = 0;
653 
654     QLIST_FOREACH(req, &s->buffered_requests, list) {
655         c++;
656     }
657     if (c > MAX_BUFFERED_REQS) {
658         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
659     }
660 
661     req = g_new0(IDEBufferedRequest, 1);
662     req->original_qiov = iov;
663     req->original_cb = cb;
664     req->original_opaque = opaque;
665     qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
666                         iov->size);
667 
668     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
669                             &req->qiov, 0, ide_buffered_readv_cb, req);
670 
671     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
672     return aioreq;
673 }
674 
675 /**
676  * Cancel all pending DMA requests.
677  * Any buffered DMA requests are instantly canceled,
678  * but any pending unbuffered DMA requests must be waited on.
679  */
680 void ide_cancel_dma_sync(IDEState *s)
681 {
682     IDEBufferedRequest *req;
683 
684     /* First invoke the callbacks of all buffered requests
685      * and flag those requests as orphaned. Ideally there
686      * are no unbuffered (Scatter Gather DMA Requests or
687      * write requests) pending and we can avoid to drain. */
688     QLIST_FOREACH(req, &s->buffered_requests, list) {
689         if (!req->orphaned) {
690             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
691             req->original_cb(req->original_opaque, -ECANCELED);
692         }
693         req->orphaned = true;
694     }
695 
696     /*
697      * We can't cancel Scatter Gather DMA in the middle of the
698      * operation or a partial (not full) DMA transfer would reach
699      * the storage so we wait for completion instead (we beahve
700      * like if the DMA was completed by the time the guest trying
701      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
702      * set).
703      *
704      * In the future we'll be able to safely cancel the I/O if the
705      * whole DMA operation will be submitted to disk with a single
706      * aio operation with preadv/pwritev.
707      */
708     if (s->bus->dma->aiocb) {
709         trace_ide_cancel_dma_sync_remaining();
710         blk_drain(s->blk);
711         assert(s->bus->dma->aiocb == NULL);
712     }
713 }
714 
715 static void ide_sector_read(IDEState *s);
716 
717 static void ide_sector_read_cb(void *opaque, int ret)
718 {
719     IDEState *s = opaque;
720     int n;
721 
722     s->pio_aiocb = NULL;
723     s->status &= ~BUSY_STAT;
724 
725     if (ret == -ECANCELED) {
726         return;
727     }
728     if (ret != 0) {
729         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
730                                 IDE_RETRY_READ)) {
731             return;
732         }
733     }
734 
735     block_acct_done(blk_get_stats(s->blk), &s->acct);
736 
737     n = s->nsector;
738     if (n > s->req_nb_sectors) {
739         n = s->req_nb_sectors;
740     }
741 
742     ide_set_sector(s, ide_get_sector(s) + n);
743     s->nsector -= n;
744     /* Allow the guest to read the io_buffer */
745     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
746     ide_set_irq(s->bus);
747 }
748 
749 static void ide_sector_read(IDEState *s)
750 {
751     int64_t sector_num;
752     int n;
753 
754     s->status = READY_STAT | SEEK_STAT;
755     s->error = 0; /* not needed by IDE spec, but needed by Windows */
756     sector_num = ide_get_sector(s);
757     n = s->nsector;
758 
759     if (n == 0) {
760         ide_transfer_stop(s);
761         return;
762     }
763 
764     s->status |= BUSY_STAT;
765 
766     if (n > s->req_nb_sectors) {
767         n = s->req_nb_sectors;
768     }
769 
770     trace_ide_sector_read(sector_num, n);
771 
772     if (!ide_sect_range_ok(s, sector_num, n)) {
773         ide_rw_error(s);
774         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
775         return;
776     }
777 
778     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
779 
780     block_acct_start(blk_get_stats(s->blk), &s->acct,
781                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
782     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
783                                       ide_sector_read_cb, s);
784 }
785 
786 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
787 {
788     if (s->bus->dma->ops->commit_buf) {
789         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
790     }
791     s->io_buffer_offset += tx_bytes;
792     qemu_sglist_destroy(&s->sg);
793 }
794 
795 void ide_set_inactive(IDEState *s, bool more)
796 {
797     s->bus->dma->aiocb = NULL;
798     ide_clear_retry(s);
799     if (s->bus->dma->ops->set_inactive) {
800         s->bus->dma->ops->set_inactive(s->bus->dma, more);
801     }
802     ide_cmd_done(s);
803 }
804 
805 void ide_dma_error(IDEState *s)
806 {
807     dma_buf_commit(s, 0);
808     ide_abort_command(s);
809     ide_set_inactive(s, false);
810     ide_set_irq(s->bus);
811 }
812 
813 int ide_handle_rw_error(IDEState *s, int error, int op)
814 {
815     bool is_read = (op & IDE_RETRY_READ) != 0;
816     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
817 
818     if (action == BLOCK_ERROR_ACTION_STOP) {
819         assert(s->bus->retry_unit == s->unit);
820         s->bus->error_status = op;
821     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
822         block_acct_failed(blk_get_stats(s->blk), &s->acct);
823         if (IS_IDE_RETRY_DMA(op)) {
824             ide_dma_error(s);
825         } else if (IS_IDE_RETRY_ATAPI(op)) {
826             ide_atapi_io_error(s, -error);
827         } else {
828             ide_rw_error(s);
829         }
830     }
831     blk_error_action(s->blk, action, is_read, error);
832     return action != BLOCK_ERROR_ACTION_IGNORE;
833 }
834 
835 static void ide_dma_cb(void *opaque, int ret)
836 {
837     IDEState *s = opaque;
838     int n;
839     int64_t sector_num;
840     uint64_t offset;
841     bool stay_active = false;
842 
843     if (ret == -ECANCELED) {
844         return;
845     }
846 
847     if (ret == -EINVAL) {
848         ide_dma_error(s);
849         return;
850     }
851 
852     if (ret < 0) {
853         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
854             s->bus->dma->aiocb = NULL;
855             dma_buf_commit(s, 0);
856             return;
857         }
858     }
859 
860     n = s->io_buffer_size >> 9;
861     if (n > s->nsector) {
862         /* The PRDs were longer than needed for this request. Shorten them so
863          * we don't get a negative remainder. The Active bit must remain set
864          * after the request completes. */
865         n = s->nsector;
866         stay_active = true;
867     }
868 
869     sector_num = ide_get_sector(s);
870     if (n > 0) {
871         assert(n * 512 == s->sg.size);
872         dma_buf_commit(s, s->sg.size);
873         sector_num += n;
874         ide_set_sector(s, sector_num);
875         s->nsector -= n;
876     }
877 
878     /* end of transfer ? */
879     if (s->nsector == 0) {
880         s->status = READY_STAT | SEEK_STAT;
881         ide_set_irq(s->bus);
882         goto eot;
883     }
884 
885     /* launch next transfer */
886     n = s->nsector;
887     s->io_buffer_index = 0;
888     s->io_buffer_size = n * 512;
889     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
890         /* The PRDs were too short. Reset the Active bit, but don't raise an
891          * interrupt. */
892         s->status = READY_STAT | SEEK_STAT;
893         dma_buf_commit(s, 0);
894         goto eot;
895     }
896 
897     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
898 
899     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
900         !ide_sect_range_ok(s, sector_num, n)) {
901         ide_dma_error(s);
902         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
903         return;
904     }
905 
906     offset = sector_num << BDRV_SECTOR_BITS;
907     switch (s->dma_cmd) {
908     case IDE_DMA_READ:
909         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
910                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
911         break;
912     case IDE_DMA_WRITE:
913         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
914                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
915         break;
916     case IDE_DMA_TRIM:
917         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
918                                         &s->sg, offset, BDRV_SECTOR_SIZE,
919                                         ide_issue_trim, s, ide_dma_cb, s,
920                                         DMA_DIRECTION_TO_DEVICE);
921         break;
922     default:
923         abort();
924     }
925     return;
926 
927 eot:
928     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
929         block_acct_done(blk_get_stats(s->blk), &s->acct);
930     }
931     ide_set_inactive(s, stay_active);
932 }
933 
934 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
935 {
936     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
937     s->io_buffer_size = 0;
938     s->dma_cmd = dma_cmd;
939 
940     switch (dma_cmd) {
941     case IDE_DMA_READ:
942         block_acct_start(blk_get_stats(s->blk), &s->acct,
943                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
944         break;
945     case IDE_DMA_WRITE:
946         block_acct_start(blk_get_stats(s->blk), &s->acct,
947                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
948         break;
949     default:
950         break;
951     }
952 
953     ide_start_dma(s, ide_dma_cb);
954 }
955 
956 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
957 {
958     s->io_buffer_index = 0;
959     ide_set_retry(s);
960     if (s->bus->dma->ops->start_dma) {
961         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
962     }
963 }
964 
965 static void ide_sector_write(IDEState *s);
966 
967 static void ide_sector_write_timer_cb(void *opaque)
968 {
969     IDEState *s = opaque;
970     ide_set_irq(s->bus);
971 }
972 
973 static void ide_sector_write_cb(void *opaque, int ret)
974 {
975     IDEState *s = opaque;
976     int n;
977 
978     if (ret == -ECANCELED) {
979         return;
980     }
981 
982     s->pio_aiocb = NULL;
983     s->status &= ~BUSY_STAT;
984 
985     if (ret != 0) {
986         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
987             return;
988         }
989     }
990 
991     block_acct_done(blk_get_stats(s->blk), &s->acct);
992 
993     n = s->nsector;
994     if (n > s->req_nb_sectors) {
995         n = s->req_nb_sectors;
996     }
997     s->nsector -= n;
998 
999     ide_set_sector(s, ide_get_sector(s) + n);
1000     if (s->nsector == 0) {
1001         /* no more sectors to write */
1002         ide_transfer_stop(s);
1003     } else {
1004         int n1 = s->nsector;
1005         if (n1 > s->req_nb_sectors) {
1006             n1 = s->req_nb_sectors;
1007         }
1008         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1009                            ide_sector_write);
1010     }
1011 
1012     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1013         /* It seems there is a bug in the Windows 2000 installer HDD
1014            IDE driver which fills the disk with empty logs when the
1015            IDE write IRQ comes too early. This hack tries to correct
1016            that at the expense of slower write performances. Use this
1017            option _only_ to install Windows 2000. You must disable it
1018            for normal use. */
1019         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1020                   (NANOSECONDS_PER_SECOND / 1000));
1021     } else {
1022         ide_set_irq(s->bus);
1023     }
1024 }
1025 
1026 static void ide_sector_write(IDEState *s)
1027 {
1028     int64_t sector_num;
1029     int n;
1030 
1031     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1032     sector_num = ide_get_sector(s);
1033 
1034     n = s->nsector;
1035     if (n > s->req_nb_sectors) {
1036         n = s->req_nb_sectors;
1037     }
1038 
1039     trace_ide_sector_write(sector_num, n);
1040 
1041     if (!ide_sect_range_ok(s, sector_num, n)) {
1042         ide_rw_error(s);
1043         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1044         return;
1045     }
1046 
1047     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1048 
1049     block_acct_start(blk_get_stats(s->blk), &s->acct,
1050                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1051     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1052                                    &s->qiov, 0, ide_sector_write_cb, s);
1053 }
1054 
1055 static void ide_flush_cb(void *opaque, int ret)
1056 {
1057     IDEState *s = opaque;
1058 
1059     s->pio_aiocb = NULL;
1060 
1061     if (ret == -ECANCELED) {
1062         return;
1063     }
1064     if (ret < 0) {
1065         /* XXX: What sector number to set here? */
1066         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1067             return;
1068         }
1069     }
1070 
1071     if (s->blk) {
1072         block_acct_done(blk_get_stats(s->blk), &s->acct);
1073     }
1074     s->status = READY_STAT | SEEK_STAT;
1075     ide_cmd_done(s);
1076     ide_set_irq(s->bus);
1077 }
1078 
1079 static void ide_flush_cache(IDEState *s)
1080 {
1081     if (s->blk == NULL) {
1082         ide_flush_cb(s, 0);
1083         return;
1084     }
1085 
1086     s->status |= BUSY_STAT;
1087     ide_set_retry(s);
1088     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1089     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1090 }
1091 
1092 static void ide_cfata_metadata_inquiry(IDEState *s)
1093 {
1094     uint16_t *p;
1095     uint32_t spd;
1096 
1097     p = (uint16_t *) s->io_buffer;
1098     memset(p, 0, 0x200);
1099     spd = ((s->mdata_size - 1) >> 9) + 1;
1100 
1101     put_le16(p + 0, 0x0001);			/* Data format revision */
1102     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1103     put_le16(p + 2, s->media_changed);		/* Media status */
1104     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1105     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1106     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1107     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1108 }
1109 
1110 static void ide_cfata_metadata_read(IDEState *s)
1111 {
1112     uint16_t *p;
1113 
1114     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1115         s->status = ERR_STAT;
1116         s->error = ABRT_ERR;
1117         return;
1118     }
1119 
1120     p = (uint16_t *) s->io_buffer;
1121     memset(p, 0, 0x200);
1122 
1123     put_le16(p + 0, s->media_changed);		/* Media status */
1124     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1125                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1126                                     s->nsector << 9), 0x200 - 2));
1127 }
1128 
1129 static void ide_cfata_metadata_write(IDEState *s)
1130 {
1131     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1132         s->status = ERR_STAT;
1133         s->error = ABRT_ERR;
1134         return;
1135     }
1136 
1137     s->media_changed = 0;
1138 
1139     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1140                     s->io_buffer + 2,
1141                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1142                                     s->nsector << 9), 0x200 - 2));
1143 }
1144 
1145 /* called when the inserted state of the media has changed */
1146 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1147 {
1148     IDEState *s = opaque;
1149     uint64_t nb_sectors;
1150 
1151     s->tray_open = !load;
1152     blk_get_geometry(s->blk, &nb_sectors);
1153     s->nb_sectors = nb_sectors;
1154 
1155     /*
1156      * First indicate to the guest that a CD has been removed.  That's
1157      * done on the next command the guest sends us.
1158      *
1159      * Then we set UNIT_ATTENTION, by which the guest will
1160      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1161      */
1162     s->cdrom_changed = 1;
1163     s->events.new_media = true;
1164     s->events.eject_request = false;
1165     ide_set_irq(s->bus);
1166 }
1167 
1168 static void ide_cd_eject_request_cb(void *opaque, bool force)
1169 {
1170     IDEState *s = opaque;
1171 
1172     s->events.eject_request = true;
1173     if (force) {
1174         s->tray_locked = false;
1175     }
1176     ide_set_irq(s->bus);
1177 }
1178 
1179 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1180 {
1181     s->lba48 = lba48;
1182 
1183     /* handle the 'magic' 0 nsector count conversion here. to avoid
1184      * fiddling with the rest of the read logic, we just store the
1185      * full sector count in ->nsector and ignore ->hob_nsector from now
1186      */
1187     if (!s->lba48) {
1188         if (!s->nsector)
1189             s->nsector = 256;
1190     } else {
1191         if (!s->nsector && !s->hob_nsector)
1192             s->nsector = 65536;
1193         else {
1194             int lo = s->nsector;
1195             int hi = s->hob_nsector;
1196 
1197             s->nsector = (hi << 8) | lo;
1198         }
1199     }
1200 }
1201 
1202 static void ide_clear_hob(IDEBus *bus)
1203 {
1204     /* any write clears HOB high bit of device control register */
1205     bus->ifs[0].select &= ~(1 << 7);
1206     bus->ifs[1].select &= ~(1 << 7);
1207 }
1208 
1209 /* IOport [W]rite [R]egisters */
1210 enum ATA_IOPORT_WR {
1211     ATA_IOPORT_WR_DATA = 0,
1212     ATA_IOPORT_WR_FEATURES = 1,
1213     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1214     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1215     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1216     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1217     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1218     ATA_IOPORT_WR_COMMAND = 7,
1219     ATA_IOPORT_WR_NUM_REGISTERS,
1220 };
1221 
1222 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1223     [ATA_IOPORT_WR_DATA] = "Data",
1224     [ATA_IOPORT_WR_FEATURES] = "Features",
1225     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1226     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1227     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1228     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1229     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1230     [ATA_IOPORT_WR_COMMAND] = "Command"
1231 };
1232 
1233 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1234 {
1235     IDEBus *bus = opaque;
1236     IDEState *s = idebus_active_if(bus);
1237     int reg_num = addr & 7;
1238 
1239     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1240 
1241     /* ignore writes to command block while busy with previous command */
1242     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1243         return;
1244     }
1245 
1246     switch (reg_num) {
1247     case 0:
1248         break;
1249     case ATA_IOPORT_WR_FEATURES:
1250         ide_clear_hob(bus);
1251         /* NOTE: data is written to the two drives */
1252         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1253         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1254         bus->ifs[0].feature = val;
1255         bus->ifs[1].feature = val;
1256         break;
1257     case ATA_IOPORT_WR_SECTOR_COUNT:
1258         ide_clear_hob(bus);
1259         bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1260         bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1261         bus->ifs[0].nsector = val;
1262         bus->ifs[1].nsector = val;
1263         break;
1264     case ATA_IOPORT_WR_SECTOR_NUMBER:
1265         ide_clear_hob(bus);
1266         bus->ifs[0].hob_sector = bus->ifs[0].sector;
1267         bus->ifs[1].hob_sector = bus->ifs[1].sector;
1268         bus->ifs[0].sector = val;
1269         bus->ifs[1].sector = val;
1270         break;
1271     case ATA_IOPORT_WR_CYLINDER_LOW:
1272         ide_clear_hob(bus);
1273         bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1274         bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1275         bus->ifs[0].lcyl = val;
1276         bus->ifs[1].lcyl = val;
1277         break;
1278     case ATA_IOPORT_WR_CYLINDER_HIGH:
1279         ide_clear_hob(bus);
1280         bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1281         bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1282         bus->ifs[0].hcyl = val;
1283         bus->ifs[1].hcyl = val;
1284         break;
1285     case ATA_IOPORT_WR_DEVICE_HEAD:
1286         /* FIXME: HOB readback uses bit 7 */
1287         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1288         bus->ifs[1].select = (val | 0x10) | 0xa0;
1289         /* select drive */
1290         bus->unit = (val >> 4) & 1;
1291         break;
1292     default:
1293     case ATA_IOPORT_WR_COMMAND:
1294         /* command */
1295         ide_exec_cmd(bus, val);
1296         break;
1297     }
1298 }
1299 
1300 static void ide_reset(IDEState *s)
1301 {
1302     trace_ide_reset(s);
1303 
1304     if (s->pio_aiocb) {
1305         blk_aio_cancel(s->pio_aiocb);
1306         s->pio_aiocb = NULL;
1307     }
1308 
1309     if (s->drive_kind == IDE_CFATA)
1310         s->mult_sectors = 0;
1311     else
1312         s->mult_sectors = MAX_MULT_SECTORS;
1313     /* ide regs */
1314     s->feature = 0;
1315     s->error = 0;
1316     s->nsector = 0;
1317     s->sector = 0;
1318     s->lcyl = 0;
1319     s->hcyl = 0;
1320 
1321     /* lba48 */
1322     s->hob_feature = 0;
1323     s->hob_sector = 0;
1324     s->hob_nsector = 0;
1325     s->hob_lcyl = 0;
1326     s->hob_hcyl = 0;
1327 
1328     s->select = 0xa0;
1329     s->status = READY_STAT | SEEK_STAT;
1330 
1331     s->lba48 = 0;
1332 
1333     /* ATAPI specific */
1334     s->sense_key = 0;
1335     s->asc = 0;
1336     s->cdrom_changed = 0;
1337     s->packet_transfer_size = 0;
1338     s->elementary_transfer_size = 0;
1339     s->io_buffer_index = 0;
1340     s->cd_sector_size = 0;
1341     s->atapi_dma = 0;
1342     s->tray_locked = 0;
1343     s->tray_open = 0;
1344     /* ATA DMA state */
1345     s->io_buffer_size = 0;
1346     s->req_nb_sectors = 0;
1347 
1348     ide_set_signature(s);
1349     /* init the transfer handler so that 0xffff is returned on data
1350        accesses */
1351     s->end_transfer_func = ide_dummy_transfer_stop;
1352     ide_dummy_transfer_stop(s);
1353     s->media_changed = 0;
1354 }
1355 
1356 static bool cmd_nop(IDEState *s, uint8_t cmd)
1357 {
1358     return true;
1359 }
1360 
1361 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1362 {
1363     /* Halt PIO (in the DRQ phase), then DMA */
1364     ide_transfer_halt(s);
1365     ide_cancel_dma_sync(s);
1366 
1367     /* Reset any PIO commands, reset signature, etc */
1368     ide_reset(s);
1369 
1370     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1371      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1372     s->status = 0x00;
1373 
1374     /* Do not overwrite status register */
1375     return false;
1376 }
1377 
1378 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1379 {
1380     switch (s->feature) {
1381     case DSM_TRIM:
1382         if (s->blk) {
1383             ide_sector_start_dma(s, IDE_DMA_TRIM);
1384             return false;
1385         }
1386         break;
1387     }
1388 
1389     ide_abort_command(s);
1390     return true;
1391 }
1392 
1393 static bool cmd_identify(IDEState *s, uint8_t cmd)
1394 {
1395     if (s->blk && s->drive_kind != IDE_CD) {
1396         if (s->drive_kind != IDE_CFATA) {
1397             ide_identify(s);
1398         } else {
1399             ide_cfata_identify(s);
1400         }
1401         s->status = READY_STAT | SEEK_STAT;
1402         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1403         ide_set_irq(s->bus);
1404         return false;
1405     } else {
1406         if (s->drive_kind == IDE_CD) {
1407             ide_set_signature(s);
1408         }
1409         ide_abort_command(s);
1410     }
1411 
1412     return true;
1413 }
1414 
1415 static bool cmd_verify(IDEState *s, uint8_t cmd)
1416 {
1417     bool lba48 = (cmd == WIN_VERIFY_EXT);
1418 
1419     /* do sector number check ? */
1420     ide_cmd_lba48_transform(s, lba48);
1421 
1422     return true;
1423 }
1424 
1425 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1426 {
1427     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1428         /* Disable Read and Write Multiple */
1429         s->mult_sectors = 0;
1430     } else if ((s->nsector & 0xff) != 0 &&
1431         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1432          (s->nsector & (s->nsector - 1)) != 0)) {
1433         ide_abort_command(s);
1434     } else {
1435         s->mult_sectors = s->nsector & 0xff;
1436     }
1437 
1438     return true;
1439 }
1440 
1441 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1442 {
1443     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1444 
1445     if (!s->blk || !s->mult_sectors) {
1446         ide_abort_command(s);
1447         return true;
1448     }
1449 
1450     ide_cmd_lba48_transform(s, lba48);
1451     s->req_nb_sectors = s->mult_sectors;
1452     ide_sector_read(s);
1453     return false;
1454 }
1455 
1456 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1457 {
1458     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1459     int n;
1460 
1461     if (!s->blk || !s->mult_sectors) {
1462         ide_abort_command(s);
1463         return true;
1464     }
1465 
1466     ide_cmd_lba48_transform(s, lba48);
1467 
1468     s->req_nb_sectors = s->mult_sectors;
1469     n = MIN(s->nsector, s->req_nb_sectors);
1470 
1471     s->status = SEEK_STAT | READY_STAT;
1472     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1473 
1474     s->media_changed = 1;
1475 
1476     return false;
1477 }
1478 
1479 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1480 {
1481     bool lba48 = (cmd == WIN_READ_EXT);
1482 
1483     if (s->drive_kind == IDE_CD) {
1484         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1485         ide_abort_command(s);
1486         return true;
1487     }
1488 
1489     if (!s->blk) {
1490         ide_abort_command(s);
1491         return true;
1492     }
1493 
1494     ide_cmd_lba48_transform(s, lba48);
1495     s->req_nb_sectors = 1;
1496     ide_sector_read(s);
1497 
1498     return false;
1499 }
1500 
1501 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1502 {
1503     bool lba48 = (cmd == WIN_WRITE_EXT);
1504 
1505     if (!s->blk) {
1506         ide_abort_command(s);
1507         return true;
1508     }
1509 
1510     ide_cmd_lba48_transform(s, lba48);
1511 
1512     s->req_nb_sectors = 1;
1513     s->status = SEEK_STAT | READY_STAT;
1514     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1515 
1516     s->media_changed = 1;
1517 
1518     return false;
1519 }
1520 
1521 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1522 {
1523     bool lba48 = (cmd == WIN_READDMA_EXT);
1524 
1525     if (!s->blk) {
1526         ide_abort_command(s);
1527         return true;
1528     }
1529 
1530     ide_cmd_lba48_transform(s, lba48);
1531     ide_sector_start_dma(s, IDE_DMA_READ);
1532 
1533     return false;
1534 }
1535 
1536 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1537 {
1538     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1539 
1540     if (!s->blk) {
1541         ide_abort_command(s);
1542         return true;
1543     }
1544 
1545     ide_cmd_lba48_transform(s, lba48);
1546     ide_sector_start_dma(s, IDE_DMA_WRITE);
1547 
1548     s->media_changed = 1;
1549 
1550     return false;
1551 }
1552 
1553 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1554 {
1555     ide_flush_cache(s);
1556     return false;
1557 }
1558 
1559 static bool cmd_seek(IDEState *s, uint8_t cmd)
1560 {
1561     /* XXX: Check that seek is within bounds */
1562     return true;
1563 }
1564 
1565 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1566 {
1567     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1568 
1569     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1570     if (s->nb_sectors == 0) {
1571         ide_abort_command(s);
1572         return true;
1573     }
1574 
1575     ide_cmd_lba48_transform(s, lba48);
1576     ide_set_sector(s, s->nb_sectors - 1);
1577 
1578     return true;
1579 }
1580 
1581 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1582 {
1583     s->nsector = 0xff; /* device active or idle */
1584     return true;
1585 }
1586 
1587 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1588 {
1589     uint16_t *identify_data;
1590 
1591     if (!s->blk) {
1592         ide_abort_command(s);
1593         return true;
1594     }
1595 
1596     /* XXX: valid for CDROM ? */
1597     switch (s->feature) {
1598     case 0x02: /* write cache enable */
1599         blk_set_enable_write_cache(s->blk, true);
1600         identify_data = (uint16_t *)s->identify_data;
1601         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1602         return true;
1603     case 0x82: /* write cache disable */
1604         blk_set_enable_write_cache(s->blk, false);
1605         identify_data = (uint16_t *)s->identify_data;
1606         put_le16(identify_data + 85, (1 << 14) | 1);
1607         ide_flush_cache(s);
1608         return false;
1609     case 0xcc: /* reverting to power-on defaults enable */
1610     case 0x66: /* reverting to power-on defaults disable */
1611     case 0xaa: /* read look-ahead enable */
1612     case 0x55: /* read look-ahead disable */
1613     case 0x05: /* set advanced power management mode */
1614     case 0x85: /* disable advanced power management mode */
1615     case 0x69: /* NOP */
1616     case 0x67: /* NOP */
1617     case 0x96: /* NOP */
1618     case 0x9a: /* NOP */
1619     case 0x42: /* enable Automatic Acoustic Mode */
1620     case 0xc2: /* disable Automatic Acoustic Mode */
1621         return true;
1622     case 0x03: /* set transfer mode */
1623         {
1624             uint8_t val = s->nsector & 0x07;
1625             identify_data = (uint16_t *)s->identify_data;
1626 
1627             switch (s->nsector >> 3) {
1628             case 0x00: /* pio default */
1629             case 0x01: /* pio mode */
1630                 put_le16(identify_data + 62, 0x07);
1631                 put_le16(identify_data + 63, 0x07);
1632                 put_le16(identify_data + 88, 0x3f);
1633                 break;
1634             case 0x02: /* sigle word dma mode*/
1635                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1636                 put_le16(identify_data + 63, 0x07);
1637                 put_le16(identify_data + 88, 0x3f);
1638                 break;
1639             case 0x04: /* mdma mode */
1640                 put_le16(identify_data + 62, 0x07);
1641                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1642                 put_le16(identify_data + 88, 0x3f);
1643                 break;
1644             case 0x08: /* udma mode */
1645                 put_le16(identify_data + 62, 0x07);
1646                 put_le16(identify_data + 63, 0x07);
1647                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1648                 break;
1649             default:
1650                 goto abort_cmd;
1651             }
1652             return true;
1653         }
1654     }
1655 
1656 abort_cmd:
1657     ide_abort_command(s);
1658     return true;
1659 }
1660 
1661 
1662 /*** ATAPI commands ***/
1663 
1664 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1665 {
1666     ide_atapi_identify(s);
1667     s->status = READY_STAT | SEEK_STAT;
1668     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1669     ide_set_irq(s->bus);
1670     return false;
1671 }
1672 
1673 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1674 {
1675     ide_set_signature(s);
1676 
1677     if (s->drive_kind == IDE_CD) {
1678         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1679                         * devices to return a clear status register
1680                         * with READY_STAT *not* set. */
1681         s->error = 0x01;
1682     } else {
1683         s->status = READY_STAT | SEEK_STAT;
1684         /* The bits of the error register are not as usual for this command!
1685          * They are part of the regular output (this is why ERR_STAT isn't set)
1686          * Device 0 passed, Device 1 passed or not present. */
1687         s->error = 0x01;
1688         ide_set_irq(s->bus);
1689     }
1690 
1691     return false;
1692 }
1693 
1694 static bool cmd_packet(IDEState *s, uint8_t cmd)
1695 {
1696     /* overlapping commands not supported */
1697     if (s->feature & 0x02) {
1698         ide_abort_command(s);
1699         return true;
1700     }
1701 
1702     s->status = READY_STAT | SEEK_STAT;
1703     s->atapi_dma = s->feature & 1;
1704     if (s->atapi_dma) {
1705         s->dma_cmd = IDE_DMA_ATAPI;
1706     }
1707     s->nsector = 1;
1708     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1709                        ide_atapi_cmd);
1710     return false;
1711 }
1712 
1713 
1714 /*** CF-ATA commands ***/
1715 
1716 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1717 {
1718     s->error = 0x09;    /* miscellaneous error */
1719     s->status = READY_STAT | SEEK_STAT;
1720     ide_set_irq(s->bus);
1721 
1722     return false;
1723 }
1724 
1725 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1726 {
1727     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1728      * required for Windows 8 to work with AHCI */
1729 
1730     if (cmd == CFA_WEAR_LEVEL) {
1731         s->nsector = 0;
1732     }
1733 
1734     if (cmd == CFA_ERASE_SECTORS) {
1735         s->media_changed = 1;
1736     }
1737 
1738     return true;
1739 }
1740 
1741 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1742 {
1743     s->status = READY_STAT | SEEK_STAT;
1744 
1745     memset(s->io_buffer, 0, 0x200);
1746     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1747     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1748     s->io_buffer[0x02] = s->select;                 /* Head */
1749     s->io_buffer[0x03] = s->sector;                 /* Sector */
1750     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1751     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1752     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1753     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1754     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1755     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1756     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1757 
1758     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1759     ide_set_irq(s->bus);
1760 
1761     return false;
1762 }
1763 
1764 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1765 {
1766     switch (s->feature) {
1767     case 0x02:  /* Inquiry Metadata Storage */
1768         ide_cfata_metadata_inquiry(s);
1769         break;
1770     case 0x03:  /* Read Metadata Storage */
1771         ide_cfata_metadata_read(s);
1772         break;
1773     case 0x04:  /* Write Metadata Storage */
1774         ide_cfata_metadata_write(s);
1775         break;
1776     default:
1777         ide_abort_command(s);
1778         return true;
1779     }
1780 
1781     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1782     s->status = 0x00; /* NOTE: READY is _not_ set */
1783     ide_set_irq(s->bus);
1784 
1785     return false;
1786 }
1787 
1788 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1789 {
1790     switch (s->feature) {
1791     case 0x01:  /* sense temperature in device */
1792         s->nsector = 0x50;      /* +20 C */
1793         break;
1794     default:
1795         ide_abort_command(s);
1796         return true;
1797     }
1798 
1799     return true;
1800 }
1801 
1802 
1803 /*** SMART commands ***/
1804 
1805 static bool cmd_smart(IDEState *s, uint8_t cmd)
1806 {
1807     int n;
1808 
1809     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1810         goto abort_cmd;
1811     }
1812 
1813     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1814         goto abort_cmd;
1815     }
1816 
1817     switch (s->feature) {
1818     case SMART_DISABLE:
1819         s->smart_enabled = 0;
1820         return true;
1821 
1822     case SMART_ENABLE:
1823         s->smart_enabled = 1;
1824         return true;
1825 
1826     case SMART_ATTR_AUTOSAVE:
1827         switch (s->sector) {
1828         case 0x00:
1829             s->smart_autosave = 0;
1830             break;
1831         case 0xf1:
1832             s->smart_autosave = 1;
1833             break;
1834         default:
1835             goto abort_cmd;
1836         }
1837         return true;
1838 
1839     case SMART_STATUS:
1840         if (!s->smart_errors) {
1841             s->hcyl = 0xc2;
1842             s->lcyl = 0x4f;
1843         } else {
1844             s->hcyl = 0x2c;
1845             s->lcyl = 0xf4;
1846         }
1847         return true;
1848 
1849     case SMART_READ_THRESH:
1850         memset(s->io_buffer, 0, 0x200);
1851         s->io_buffer[0] = 0x01; /* smart struct version */
1852 
1853         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1854             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1855             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1856         }
1857 
1858         /* checksum */
1859         for (n = 0; n < 511; n++) {
1860             s->io_buffer[511] += s->io_buffer[n];
1861         }
1862         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1863 
1864         s->status = READY_STAT | SEEK_STAT;
1865         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1866         ide_set_irq(s->bus);
1867         return false;
1868 
1869     case SMART_READ_DATA:
1870         memset(s->io_buffer, 0, 0x200);
1871         s->io_buffer[0] = 0x01; /* smart struct version */
1872 
1873         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1874             int i;
1875             for (i = 0; i < 11; i++) {
1876                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1877             }
1878         }
1879 
1880         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1881         if (s->smart_selftest_count == 0) {
1882             s->io_buffer[363] = 0;
1883         } else {
1884             s->io_buffer[363] =
1885                 s->smart_selftest_data[3 +
1886                            (s->smart_selftest_count - 1) *
1887                            24];
1888         }
1889         s->io_buffer[364] = 0x20;
1890         s->io_buffer[365] = 0x01;
1891         /* offline data collection capacity: execute + self-test*/
1892         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1893         s->io_buffer[368] = 0x03; /* smart capability (1) */
1894         s->io_buffer[369] = 0x00; /* smart capability (2) */
1895         s->io_buffer[370] = 0x01; /* error logging supported */
1896         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1897         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1898         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1899 
1900         for (n = 0; n < 511; n++) {
1901             s->io_buffer[511] += s->io_buffer[n];
1902         }
1903         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1904 
1905         s->status = READY_STAT | SEEK_STAT;
1906         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1907         ide_set_irq(s->bus);
1908         return false;
1909 
1910     case SMART_READ_LOG:
1911         switch (s->sector) {
1912         case 0x01: /* summary smart error log */
1913             memset(s->io_buffer, 0, 0x200);
1914             s->io_buffer[0] = 0x01;
1915             s->io_buffer[1] = 0x00; /* no error entries */
1916             s->io_buffer[452] = s->smart_errors & 0xff;
1917             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1918 
1919             for (n = 0; n < 511; n++) {
1920                 s->io_buffer[511] += s->io_buffer[n];
1921             }
1922             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1923             break;
1924         case 0x06: /* smart self test log */
1925             memset(s->io_buffer, 0, 0x200);
1926             s->io_buffer[0] = 0x01;
1927             if (s->smart_selftest_count == 0) {
1928                 s->io_buffer[508] = 0;
1929             } else {
1930                 s->io_buffer[508] = s->smart_selftest_count;
1931                 for (n = 2; n < 506; n++)  {
1932                     s->io_buffer[n] = s->smart_selftest_data[n];
1933                 }
1934             }
1935 
1936             for (n = 0; n < 511; n++) {
1937                 s->io_buffer[511] += s->io_buffer[n];
1938             }
1939             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1940             break;
1941         default:
1942             goto abort_cmd;
1943         }
1944         s->status = READY_STAT | SEEK_STAT;
1945         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1946         ide_set_irq(s->bus);
1947         return false;
1948 
1949     case SMART_EXECUTE_OFFLINE:
1950         switch (s->sector) {
1951         case 0: /* off-line routine */
1952         case 1: /* short self test */
1953         case 2: /* extended self test */
1954             s->smart_selftest_count++;
1955             if (s->smart_selftest_count > 21) {
1956                 s->smart_selftest_count = 1;
1957             }
1958             n = 2 + (s->smart_selftest_count - 1) * 24;
1959             s->smart_selftest_data[n] = s->sector;
1960             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1961             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1962             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1963             break;
1964         default:
1965             goto abort_cmd;
1966         }
1967         return true;
1968     }
1969 
1970 abort_cmd:
1971     ide_abort_command(s);
1972     return true;
1973 }
1974 
1975 #define HD_OK (1u << IDE_HD)
1976 #define CD_OK (1u << IDE_CD)
1977 #define CFA_OK (1u << IDE_CFATA)
1978 #define HD_CFA_OK (HD_OK | CFA_OK)
1979 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1980 
1981 /* Set the Disk Seek Completed status bit during completion */
1982 #define SET_DSC (1u << 8)
1983 
1984 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1985 static const struct {
1986     /* Returns true if the completion code should be run */
1987     bool (*handler)(IDEState *s, uint8_t cmd);
1988     int flags;
1989 } ide_cmd_table[0x100] = {
1990     /* NOP not implemented, mandatory for CD */
1991     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1992     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1993     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1994     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1995     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1996     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1997     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1998     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1999     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2000     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2001     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2002     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2003     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2004     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2005     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2006     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2007     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2008     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2009     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2010     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2011     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2012     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2013     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2014     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2015     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2016     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2017     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2018     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2019     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2020     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2021     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2022     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2023     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2024     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2025     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2026     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2027     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2028     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2029     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2030     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2031     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2032     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2033     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2034     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2035     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2036     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2037     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2038     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2039     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2040     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2041     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2042     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2043     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2044     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2045     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2046     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2047 };
2048 
2049 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2050 {
2051     return cmd < ARRAY_SIZE(ide_cmd_table)
2052         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2053 }
2054 
2055 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2056 {
2057     IDEState *s;
2058     bool complete;
2059 
2060     s = idebus_active_if(bus);
2061     trace_ide_exec_cmd(bus, s, val);
2062 
2063     /* ignore commands to non existent slave */
2064     if (s != bus->ifs && !s->blk) {
2065         return;
2066     }
2067 
2068     /* Only RESET is allowed while BSY and/or DRQ are set,
2069      * and only to ATAPI devices. */
2070     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2071         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2072             return;
2073         }
2074     }
2075 
2076     if (!ide_cmd_permitted(s, val)) {
2077         ide_abort_command(s);
2078         ide_set_irq(s->bus);
2079         return;
2080     }
2081 
2082     s->status = READY_STAT | BUSY_STAT;
2083     s->error = 0;
2084     s->io_buffer_offset = 0;
2085 
2086     complete = ide_cmd_table[val].handler(s, val);
2087     if (complete) {
2088         s->status &= ~BUSY_STAT;
2089         assert(!!s->error == !!(s->status & ERR_STAT));
2090 
2091         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2092             s->status |= SEEK_STAT;
2093         }
2094 
2095         ide_cmd_done(s);
2096         ide_set_irq(s->bus);
2097     }
2098 }
2099 
2100 /* IOport [R]ead [R]egisters */
2101 enum ATA_IOPORT_RR {
2102     ATA_IOPORT_RR_DATA = 0,
2103     ATA_IOPORT_RR_ERROR = 1,
2104     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2105     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2106     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2107     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2108     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2109     ATA_IOPORT_RR_STATUS = 7,
2110     ATA_IOPORT_RR_NUM_REGISTERS,
2111 };
2112 
2113 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2114     [ATA_IOPORT_RR_DATA] = "Data",
2115     [ATA_IOPORT_RR_ERROR] = "Error",
2116     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2117     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2118     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2119     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2120     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2121     [ATA_IOPORT_RR_STATUS] = "Status"
2122 };
2123 
2124 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2125 {
2126     IDEBus *bus = opaque;
2127     IDEState *s = idebus_active_if(bus);
2128     uint32_t reg_num;
2129     int ret, hob;
2130 
2131     reg_num = addr & 7;
2132     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2133     //hob = s->select & (1 << 7);
2134     hob = 0;
2135     switch (reg_num) {
2136     case ATA_IOPORT_RR_DATA:
2137         ret = 0xff;
2138         break;
2139     case ATA_IOPORT_RR_ERROR:
2140         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2141             (s != bus->ifs && !s->blk)) {
2142             ret = 0;
2143         } else if (!hob) {
2144             ret = s->error;
2145         } else {
2146             ret = s->hob_feature;
2147         }
2148         break;
2149     case ATA_IOPORT_RR_SECTOR_COUNT:
2150         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2151             ret = 0;
2152         } else if (!hob) {
2153             ret = s->nsector & 0xff;
2154         } else {
2155             ret = s->hob_nsector;
2156         }
2157         break;
2158     case ATA_IOPORT_RR_SECTOR_NUMBER:
2159         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2160             ret = 0;
2161         } else if (!hob) {
2162             ret = s->sector;
2163         } else {
2164             ret = s->hob_sector;
2165         }
2166         break;
2167     case ATA_IOPORT_RR_CYLINDER_LOW:
2168         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2169             ret = 0;
2170         } else if (!hob) {
2171             ret = s->lcyl;
2172         } else {
2173             ret = s->hob_lcyl;
2174         }
2175         break;
2176     case ATA_IOPORT_RR_CYLINDER_HIGH:
2177         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2178             ret = 0;
2179         } else if (!hob) {
2180             ret = s->hcyl;
2181         } else {
2182             ret = s->hob_hcyl;
2183         }
2184         break;
2185     case ATA_IOPORT_RR_DEVICE_HEAD:
2186         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2187             ret = 0;
2188         } else {
2189             ret = s->select;
2190         }
2191         break;
2192     default:
2193     case ATA_IOPORT_RR_STATUS:
2194         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2195             (s != bus->ifs && !s->blk)) {
2196             ret = 0;
2197         } else {
2198             ret = s->status;
2199         }
2200         qemu_irq_lower(bus->irq);
2201         break;
2202     }
2203 
2204     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2205     return ret;
2206 }
2207 
2208 uint32_t ide_status_read(void *opaque, uint32_t addr)
2209 {
2210     IDEBus *bus = opaque;
2211     IDEState *s = idebus_active_if(bus);
2212     int ret;
2213 
2214     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2215         (s != bus->ifs && !s->blk)) {
2216         ret = 0;
2217     } else {
2218         ret = s->status;
2219     }
2220 
2221     trace_ide_status_read(addr, ret, bus, s);
2222     return ret;
2223 }
2224 
2225 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2226 {
2227     IDEBus *bus = opaque;
2228     IDEState *s;
2229     int i;
2230 
2231     trace_ide_cmd_write(addr, val, bus);
2232 
2233     /* common for both drives */
2234     if (!(bus->cmd & IDE_CMD_RESET) &&
2235         (val & IDE_CMD_RESET)) {
2236         /* reset low to high */
2237         for(i = 0;i < 2; i++) {
2238             s = &bus->ifs[i];
2239             s->status = BUSY_STAT | SEEK_STAT;
2240             s->error = 0x01;
2241         }
2242     } else if ((bus->cmd & IDE_CMD_RESET) &&
2243                !(val & IDE_CMD_RESET)) {
2244         /* high to low */
2245         for(i = 0;i < 2; i++) {
2246             s = &bus->ifs[i];
2247             if (s->drive_kind == IDE_CD)
2248                 s->status = 0x00; /* NOTE: READY is _not_ set */
2249             else
2250                 s->status = READY_STAT | SEEK_STAT;
2251             ide_set_signature(s);
2252         }
2253     }
2254 
2255     bus->cmd = val;
2256 }
2257 
2258 /*
2259  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2260  * transferred from the device to the guest), false if it's a PIO in
2261  */
2262 static bool ide_is_pio_out(IDEState *s)
2263 {
2264     if (s->end_transfer_func == ide_sector_write ||
2265         s->end_transfer_func == ide_atapi_cmd) {
2266         return false;
2267     } else if (s->end_transfer_func == ide_sector_read ||
2268                s->end_transfer_func == ide_transfer_stop ||
2269                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2270                s->end_transfer_func == ide_dummy_transfer_stop) {
2271         return true;
2272     }
2273 
2274     abort();
2275 }
2276 
2277 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2278 {
2279     IDEBus *bus = opaque;
2280     IDEState *s = idebus_active_if(bus);
2281     uint8_t *p;
2282 
2283     trace_ide_data_writew(addr, val, bus, s);
2284 
2285     /* PIO data access allowed only when DRQ bit is set. The result of a write
2286      * during PIO out is indeterminate, just ignore it. */
2287     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2288         return;
2289     }
2290 
2291     p = s->data_ptr;
2292     if (p + 2 > s->data_end) {
2293         return;
2294     }
2295 
2296     *(uint16_t *)p = le16_to_cpu(val);
2297     p += 2;
2298     s->data_ptr = p;
2299     if (p >= s->data_end) {
2300         s->status &= ~DRQ_STAT;
2301         s->end_transfer_func(s);
2302     }
2303 }
2304 
2305 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2306 {
2307     IDEBus *bus = opaque;
2308     IDEState *s = idebus_active_if(bus);
2309     uint8_t *p;
2310     int ret;
2311 
2312     /* PIO data access allowed only when DRQ bit is set. The result of a read
2313      * during PIO in is indeterminate, return 0 and don't move forward. */
2314     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2315         return 0;
2316     }
2317 
2318     p = s->data_ptr;
2319     if (p + 2 > s->data_end) {
2320         return 0;
2321     }
2322 
2323     ret = cpu_to_le16(*(uint16_t *)p);
2324     p += 2;
2325     s->data_ptr = p;
2326     if (p >= s->data_end) {
2327         s->status &= ~DRQ_STAT;
2328         s->end_transfer_func(s);
2329     }
2330 
2331     trace_ide_data_readw(addr, ret, bus, s);
2332     return ret;
2333 }
2334 
2335 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2336 {
2337     IDEBus *bus = opaque;
2338     IDEState *s = idebus_active_if(bus);
2339     uint8_t *p;
2340 
2341     trace_ide_data_writel(addr, val, bus, s);
2342 
2343     /* PIO data access allowed only when DRQ bit is set. The result of a write
2344      * during PIO out is indeterminate, just ignore it. */
2345     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2346         return;
2347     }
2348 
2349     p = s->data_ptr;
2350     if (p + 4 > s->data_end) {
2351         return;
2352     }
2353 
2354     *(uint32_t *)p = le32_to_cpu(val);
2355     p += 4;
2356     s->data_ptr = p;
2357     if (p >= s->data_end) {
2358         s->status &= ~DRQ_STAT;
2359         s->end_transfer_func(s);
2360     }
2361 }
2362 
2363 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2364 {
2365     IDEBus *bus = opaque;
2366     IDEState *s = idebus_active_if(bus);
2367     uint8_t *p;
2368     int ret;
2369 
2370     /* PIO data access allowed only when DRQ bit is set. The result of a read
2371      * during PIO in is indeterminate, return 0 and don't move forward. */
2372     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2373         ret = 0;
2374         goto out;
2375     }
2376 
2377     p = s->data_ptr;
2378     if (p + 4 > s->data_end) {
2379         return 0;
2380     }
2381 
2382     ret = cpu_to_le32(*(uint32_t *)p);
2383     p += 4;
2384     s->data_ptr = p;
2385     if (p >= s->data_end) {
2386         s->status &= ~DRQ_STAT;
2387         s->end_transfer_func(s);
2388     }
2389 
2390 out:
2391     trace_ide_data_readl(addr, ret, bus, s);
2392     return ret;
2393 }
2394 
2395 static void ide_dummy_transfer_stop(IDEState *s)
2396 {
2397     s->data_ptr = s->io_buffer;
2398     s->data_end = s->io_buffer;
2399     s->io_buffer[0] = 0xff;
2400     s->io_buffer[1] = 0xff;
2401     s->io_buffer[2] = 0xff;
2402     s->io_buffer[3] = 0xff;
2403 }
2404 
2405 void ide_bus_reset(IDEBus *bus)
2406 {
2407     bus->unit = 0;
2408     bus->cmd = 0;
2409     ide_reset(&bus->ifs[0]);
2410     ide_reset(&bus->ifs[1]);
2411     ide_clear_hob(bus);
2412 
2413     /* pending async DMA */
2414     if (bus->dma->aiocb) {
2415         trace_ide_bus_reset_aio();
2416         blk_aio_cancel(bus->dma->aiocb);
2417         bus->dma->aiocb = NULL;
2418     }
2419 
2420     /* reset dma provider too */
2421     if (bus->dma->ops->reset) {
2422         bus->dma->ops->reset(bus->dma);
2423     }
2424 }
2425 
2426 static bool ide_cd_is_tray_open(void *opaque)
2427 {
2428     return ((IDEState *)opaque)->tray_open;
2429 }
2430 
2431 static bool ide_cd_is_medium_locked(void *opaque)
2432 {
2433     return ((IDEState *)opaque)->tray_locked;
2434 }
2435 
2436 static void ide_resize_cb(void *opaque)
2437 {
2438     IDEState *s = opaque;
2439     uint64_t nb_sectors;
2440 
2441     if (!s->identify_set) {
2442         return;
2443     }
2444 
2445     blk_get_geometry(s->blk, &nb_sectors);
2446     s->nb_sectors = nb_sectors;
2447 
2448     /* Update the identify data buffer. */
2449     if (s->drive_kind == IDE_CFATA) {
2450         ide_cfata_identify_size(s);
2451     } else {
2452         /* IDE_CD uses a different set of callbacks entirely. */
2453         assert(s->drive_kind != IDE_CD);
2454         ide_identify_size(s);
2455     }
2456 }
2457 
2458 static const BlockDevOps ide_cd_block_ops = {
2459     .change_media_cb = ide_cd_change_cb,
2460     .eject_request_cb = ide_cd_eject_request_cb,
2461     .is_tray_open = ide_cd_is_tray_open,
2462     .is_medium_locked = ide_cd_is_medium_locked,
2463 };
2464 
2465 static const BlockDevOps ide_hd_block_ops = {
2466     .resize_cb = ide_resize_cb,
2467 };
2468 
2469 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2470                    const char *version, const char *serial, const char *model,
2471                    uint64_t wwn,
2472                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2473                    int chs_trans, Error **errp)
2474 {
2475     uint64_t nb_sectors;
2476 
2477     s->blk = blk;
2478     s->drive_kind = kind;
2479 
2480     blk_get_geometry(blk, &nb_sectors);
2481     s->cylinders = cylinders;
2482     s->heads = heads;
2483     s->sectors = secs;
2484     s->chs_trans = chs_trans;
2485     s->nb_sectors = nb_sectors;
2486     s->wwn = wwn;
2487     /* The SMART values should be preserved across power cycles
2488        but they aren't.  */
2489     s->smart_enabled = 1;
2490     s->smart_autosave = 1;
2491     s->smart_errors = 0;
2492     s->smart_selftest_count = 0;
2493     if (kind == IDE_CD) {
2494         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2495         blk_set_guest_block_size(blk, 2048);
2496     } else {
2497         if (!blk_is_inserted(s->blk)) {
2498             error_setg(errp, "Device needs media, but drive is empty");
2499             return -1;
2500         }
2501         if (blk_is_read_only(blk)) {
2502             error_setg(errp, "Can't use a read-only drive");
2503             return -1;
2504         }
2505         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2506     }
2507     if (serial) {
2508         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2509     } else {
2510         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2511                  "QM%05d", s->drive_serial);
2512     }
2513     if (model) {
2514         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2515     } else {
2516         switch (kind) {
2517         case IDE_CD:
2518             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2519             break;
2520         case IDE_CFATA:
2521             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2522             break;
2523         default:
2524             strcpy(s->drive_model_str, "QEMU HARDDISK");
2525             break;
2526         }
2527     }
2528 
2529     if (version) {
2530         pstrcpy(s->version, sizeof(s->version), version);
2531     } else {
2532         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2533     }
2534 
2535     ide_reset(s);
2536     blk_iostatus_enable(blk);
2537     return 0;
2538 }
2539 
2540 static void ide_init1(IDEBus *bus, int unit)
2541 {
2542     static int drive_serial = 1;
2543     IDEState *s = &bus->ifs[unit];
2544 
2545     s->bus = bus;
2546     s->unit = unit;
2547     s->drive_serial = drive_serial++;
2548     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2549     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2550     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2551     memset(s->io_buffer, 0, s->io_buffer_total_len);
2552 
2553     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2554     memset(s->smart_selftest_data, 0, 512);
2555 
2556     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2557                                            ide_sector_write_timer_cb, s);
2558 }
2559 
2560 static int ide_nop_int(IDEDMA *dma, int x)
2561 {
2562     return 0;
2563 }
2564 
2565 static void ide_nop(IDEDMA *dma)
2566 {
2567 }
2568 
2569 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2570 {
2571     return 0;
2572 }
2573 
2574 static const IDEDMAOps ide_dma_nop_ops = {
2575     .prepare_buf    = ide_nop_int32,
2576     .restart_dma    = ide_nop,
2577     .rw_buf         = ide_nop_int,
2578 };
2579 
2580 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2581 {
2582     s->unit = s->bus->retry_unit;
2583     ide_set_sector(s, s->bus->retry_sector_num);
2584     s->nsector = s->bus->retry_nsector;
2585     s->bus->dma->ops->restart_dma(s->bus->dma);
2586     s->io_buffer_size = 0;
2587     s->dma_cmd = dma_cmd;
2588     ide_start_dma(s, ide_dma_cb);
2589 }
2590 
2591 static void ide_restart_bh(void *opaque)
2592 {
2593     IDEBus *bus = opaque;
2594     IDEState *s;
2595     bool is_read;
2596     int error_status;
2597 
2598     qemu_bh_delete(bus->bh);
2599     bus->bh = NULL;
2600 
2601     error_status = bus->error_status;
2602     if (bus->error_status == 0) {
2603         return;
2604     }
2605 
2606     s = idebus_active_if(bus);
2607     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2608 
2609     /* The error status must be cleared before resubmitting the request: The
2610      * request may fail again, and this case can only be distinguished if the
2611      * called function can set a new error status. */
2612     bus->error_status = 0;
2613 
2614     /* The HBA has generically asked to be kicked on retry */
2615     if (error_status & IDE_RETRY_HBA) {
2616         if (s->bus->dma->ops->restart) {
2617             s->bus->dma->ops->restart(s->bus->dma);
2618         }
2619     } else if (IS_IDE_RETRY_DMA(error_status)) {
2620         if (error_status & IDE_RETRY_TRIM) {
2621             ide_restart_dma(s, IDE_DMA_TRIM);
2622         } else {
2623             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2624         }
2625     } else if (IS_IDE_RETRY_PIO(error_status)) {
2626         if (is_read) {
2627             ide_sector_read(s);
2628         } else {
2629             ide_sector_write(s);
2630         }
2631     } else if (error_status & IDE_RETRY_FLUSH) {
2632         ide_flush_cache(s);
2633     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2634         assert(s->end_transfer_func == ide_atapi_cmd);
2635         ide_atapi_dma_restart(s);
2636     } else {
2637         abort();
2638     }
2639 }
2640 
2641 static void ide_restart_cb(void *opaque, int running, RunState state)
2642 {
2643     IDEBus *bus = opaque;
2644 
2645     if (!running)
2646         return;
2647 
2648     if (!bus->bh) {
2649         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2650         qemu_bh_schedule(bus->bh);
2651     }
2652 }
2653 
2654 void ide_register_restart_cb(IDEBus *bus)
2655 {
2656     if (bus->dma->ops->restart_dma) {
2657         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2658     }
2659 }
2660 
2661 static IDEDMA ide_dma_nop = {
2662     .ops = &ide_dma_nop_ops,
2663     .aiocb = NULL,
2664 };
2665 
2666 void ide_init2(IDEBus *bus, qemu_irq irq)
2667 {
2668     int i;
2669 
2670     for(i = 0; i < 2; i++) {
2671         ide_init1(bus, i);
2672         ide_reset(&bus->ifs[i]);
2673     }
2674     bus->irq = irq;
2675     bus->dma = &ide_dma_nop;
2676 }
2677 
2678 void ide_exit(IDEState *s)
2679 {
2680     timer_del(s->sector_write_timer);
2681     timer_free(s->sector_write_timer);
2682     qemu_vfree(s->smart_selftest_data);
2683     qemu_vfree(s->io_buffer);
2684 }
2685 
2686 static bool is_identify_set(void *opaque, int version_id)
2687 {
2688     IDEState *s = opaque;
2689 
2690     return s->identify_set != 0;
2691 }
2692 
2693 static EndTransferFunc* transfer_end_table[] = {
2694         ide_sector_read,
2695         ide_sector_write,
2696         ide_transfer_stop,
2697         ide_atapi_cmd_reply_end,
2698         ide_atapi_cmd,
2699         ide_dummy_transfer_stop,
2700 };
2701 
2702 static int transfer_end_table_idx(EndTransferFunc *fn)
2703 {
2704     int i;
2705 
2706     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2707         if (transfer_end_table[i] == fn)
2708             return i;
2709 
2710     return -1;
2711 }
2712 
2713 static int ide_drive_post_load(void *opaque, int version_id)
2714 {
2715     IDEState *s = opaque;
2716 
2717     if (s->blk && s->identify_set) {
2718         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2719     }
2720     return 0;
2721 }
2722 
2723 static int ide_drive_pio_post_load(void *opaque, int version_id)
2724 {
2725     IDEState *s = opaque;
2726 
2727     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2728         return -EINVAL;
2729     }
2730     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2731     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2732     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2733     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2734 
2735     return 0;
2736 }
2737 
2738 static int ide_drive_pio_pre_save(void *opaque)
2739 {
2740     IDEState *s = opaque;
2741     int idx;
2742 
2743     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2744     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2745 
2746     idx = transfer_end_table_idx(s->end_transfer_func);
2747     if (idx == -1) {
2748         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2749                         __func__);
2750         s->end_transfer_fn_idx = 2;
2751     } else {
2752         s->end_transfer_fn_idx = idx;
2753     }
2754 
2755     return 0;
2756 }
2757 
2758 static bool ide_drive_pio_state_needed(void *opaque)
2759 {
2760     IDEState *s = opaque;
2761 
2762     return ((s->status & DRQ_STAT) != 0)
2763         || (s->bus->error_status & IDE_RETRY_PIO);
2764 }
2765 
2766 static bool ide_tray_state_needed(void *opaque)
2767 {
2768     IDEState *s = opaque;
2769 
2770     return s->tray_open || s->tray_locked;
2771 }
2772 
2773 static bool ide_atapi_gesn_needed(void *opaque)
2774 {
2775     IDEState *s = opaque;
2776 
2777     return s->events.new_media || s->events.eject_request;
2778 }
2779 
2780 static bool ide_error_needed(void *opaque)
2781 {
2782     IDEBus *bus = opaque;
2783 
2784     return (bus->error_status != 0);
2785 }
2786 
2787 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2788 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2789     .name ="ide_drive/atapi/gesn_state",
2790     .version_id = 1,
2791     .minimum_version_id = 1,
2792     .needed = ide_atapi_gesn_needed,
2793     .fields = (VMStateField[]) {
2794         VMSTATE_BOOL(events.new_media, IDEState),
2795         VMSTATE_BOOL(events.eject_request, IDEState),
2796         VMSTATE_END_OF_LIST()
2797     }
2798 };
2799 
2800 static const VMStateDescription vmstate_ide_tray_state = {
2801     .name = "ide_drive/tray_state",
2802     .version_id = 1,
2803     .minimum_version_id = 1,
2804     .needed = ide_tray_state_needed,
2805     .fields = (VMStateField[]) {
2806         VMSTATE_BOOL(tray_open, IDEState),
2807         VMSTATE_BOOL(tray_locked, IDEState),
2808         VMSTATE_END_OF_LIST()
2809     }
2810 };
2811 
2812 static const VMStateDescription vmstate_ide_drive_pio_state = {
2813     .name = "ide_drive/pio_state",
2814     .version_id = 1,
2815     .minimum_version_id = 1,
2816     .pre_save = ide_drive_pio_pre_save,
2817     .post_load = ide_drive_pio_post_load,
2818     .needed = ide_drive_pio_state_needed,
2819     .fields = (VMStateField[]) {
2820         VMSTATE_INT32(req_nb_sectors, IDEState),
2821         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2822                              vmstate_info_uint8, uint8_t),
2823         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2824         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2825         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2826         VMSTATE_INT32(elementary_transfer_size, IDEState),
2827         VMSTATE_INT32(packet_transfer_size, IDEState),
2828         VMSTATE_END_OF_LIST()
2829     }
2830 };
2831 
2832 const VMStateDescription vmstate_ide_drive = {
2833     .name = "ide_drive",
2834     .version_id = 3,
2835     .minimum_version_id = 0,
2836     .post_load = ide_drive_post_load,
2837     .fields = (VMStateField[]) {
2838         VMSTATE_INT32(mult_sectors, IDEState),
2839         VMSTATE_INT32(identify_set, IDEState),
2840         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2841         VMSTATE_UINT8(feature, IDEState),
2842         VMSTATE_UINT8(error, IDEState),
2843         VMSTATE_UINT32(nsector, IDEState),
2844         VMSTATE_UINT8(sector, IDEState),
2845         VMSTATE_UINT8(lcyl, IDEState),
2846         VMSTATE_UINT8(hcyl, IDEState),
2847         VMSTATE_UINT8(hob_feature, IDEState),
2848         VMSTATE_UINT8(hob_sector, IDEState),
2849         VMSTATE_UINT8(hob_nsector, IDEState),
2850         VMSTATE_UINT8(hob_lcyl, IDEState),
2851         VMSTATE_UINT8(hob_hcyl, IDEState),
2852         VMSTATE_UINT8(select, IDEState),
2853         VMSTATE_UINT8(status, IDEState),
2854         VMSTATE_UINT8(lba48, IDEState),
2855         VMSTATE_UINT8(sense_key, IDEState),
2856         VMSTATE_UINT8(asc, IDEState),
2857         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2858         VMSTATE_END_OF_LIST()
2859     },
2860     .subsections = (const VMStateDescription*[]) {
2861         &vmstate_ide_drive_pio_state,
2862         &vmstate_ide_tray_state,
2863         &vmstate_ide_atapi_gesn_state,
2864         NULL
2865     }
2866 };
2867 
2868 static const VMStateDescription vmstate_ide_error_status = {
2869     .name ="ide_bus/error",
2870     .version_id = 2,
2871     .minimum_version_id = 1,
2872     .needed = ide_error_needed,
2873     .fields = (VMStateField[]) {
2874         VMSTATE_INT32(error_status, IDEBus),
2875         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2876         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2877         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2878         VMSTATE_END_OF_LIST()
2879     }
2880 };
2881 
2882 const VMStateDescription vmstate_ide_bus = {
2883     .name = "ide_bus",
2884     .version_id = 1,
2885     .minimum_version_id = 1,
2886     .fields = (VMStateField[]) {
2887         VMSTATE_UINT8(cmd, IDEBus),
2888         VMSTATE_UINT8(unit, IDEBus),
2889         VMSTATE_END_OF_LIST()
2890     },
2891     .subsections = (const VMStateDescription*[]) {
2892         &vmstate_ide_error_status,
2893         NULL
2894     }
2895 };
2896 
2897 void ide_drive_get(DriveInfo **hd, int n)
2898 {
2899     int i;
2900 
2901     for (i = 0; i < n; i++) {
2902         hd[i] = drive_get_by_index(IF_IDE, i);
2903     }
2904 }
2905