xref: /openbmc/qemu/hw/ide/core.c (revision c39f95dc)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 #include "qemu/error-report.h"
38 
39 #include "hw/ide/internal.h"
40 #include "trace.h"
41 
42 /* These values were based on a Seagate ST3500418AS but have been modified
43    to make more sense in QEMU */
44 static const int smart_attributes[][12] = {
45     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
46     /* raw read error rate*/
47     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
48     /* spin up */
49     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50     /* start stop count */
51     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52     /* remapped sectors */
53     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
54     /* power on hours */
55     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* power cycle count */
57     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58     /* airflow-temperature-celsius */
59     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
60 };
61 
62 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
63     [IDE_DMA_READ] = "DMA READ",
64     [IDE_DMA_WRITE] = "DMA WRITE",
65     [IDE_DMA_TRIM] = "DMA TRIM",
66     [IDE_DMA_ATAPI] = "DMA ATAPI"
67 };
68 
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
70 {
71     if ((unsigned)enval < IDE_DMA__COUNT) {
72         return IDE_DMA_CMD_lookup[enval];
73     }
74     return "DMA UNKNOWN CMD";
75 }
76 
77 static void ide_dummy_transfer_stop(IDEState *s);
78 
79 static void padstr(char *str, const char *src, int len)
80 {
81     int i, v;
82     for(i = 0; i < len; i++) {
83         if (*src)
84             v = *src++;
85         else
86             v = ' ';
87         str[i^1] = v;
88     }
89 }
90 
91 static void put_le16(uint16_t *p, unsigned int v)
92 {
93     *p = cpu_to_le16(v);
94 }
95 
96 static void ide_identify_size(IDEState *s)
97 {
98     uint16_t *p = (uint16_t *)s->identify_data;
99     put_le16(p + 60, s->nb_sectors);
100     put_le16(p + 61, s->nb_sectors >> 16);
101     put_le16(p + 100, s->nb_sectors);
102     put_le16(p + 101, s->nb_sectors >> 16);
103     put_le16(p + 102, s->nb_sectors >> 32);
104     put_le16(p + 103, s->nb_sectors >> 48);
105 }
106 
107 static void ide_identify(IDEState *s)
108 {
109     uint16_t *p;
110     unsigned int oldsize;
111     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
112 
113     p = (uint16_t *)s->identify_data;
114     if (s->identify_set) {
115         goto fill_buffer;
116     }
117     memset(p, 0, sizeof(s->identify_data));
118 
119     put_le16(p + 0, 0x0040);
120     put_le16(p + 1, s->cylinders);
121     put_le16(p + 3, s->heads);
122     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
123     put_le16(p + 5, 512); /* XXX: retired, remove ? */
124     put_le16(p + 6, s->sectors);
125     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
126     put_le16(p + 20, 3); /* XXX: retired, remove ? */
127     put_le16(p + 21, 512); /* cache size in sectors */
128     put_le16(p + 22, 4); /* ecc bytes */
129     padstr((char *)(p + 23), s->version, 8); /* firmware version */
130     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
133 #endif
134     put_le16(p + 48, 1); /* dword I/O */
135     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136     put_le16(p + 51, 0x200); /* PIO transfer cycle */
137     put_le16(p + 52, 0x200); /* DMA transfer cycle */
138     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139     put_le16(p + 54, s->cylinders);
140     put_le16(p + 55, s->heads);
141     put_le16(p + 56, s->sectors);
142     oldsize = s->cylinders * s->heads * s->sectors;
143     put_le16(p + 57, oldsize);
144     put_le16(p + 58, oldsize >> 16);
145     if (s->mult_sectors)
146         put_le16(p + 59, 0x100 | s->mult_sectors);
147     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
148     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
150     put_le16(p + 63, 0x07); /* mdma0-2 supported */
151     put_le16(p + 64, 0x03); /* pio3-4 supported */
152     put_le16(p + 65, 120);
153     put_le16(p + 66, 120);
154     put_le16(p + 67, 120);
155     put_le16(p + 68, 120);
156     if (dev && dev->conf.discard_granularity) {
157         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
158     }
159 
160     if (s->ncq_queues) {
161         put_le16(p + 75, s->ncq_queues - 1);
162         /* NCQ supported */
163         put_le16(p + 76, (1 << 8));
164     }
165 
166     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
167     put_le16(p + 81, 0x16); /* conforms to ata5 */
168     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
170     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
173     if (s->wwn) {
174         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
175     } else {
176         put_le16(p + 84, (1 << 14) | 0);
177     }
178     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179     if (blk_enable_write_cache(s->blk)) {
180         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
181     } else {
182         put_le16(p + 85, (1 << 14) | 1);
183     }
184     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
186     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
187     if (s->wwn) {
188         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
189     } else {
190         put_le16(p + 87, (1 << 14) | 0);
191     }
192     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
194     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
195     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
198 
199     if (dev && dev->conf.physical_block_size)
200         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
201     if (s->wwn) {
202         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203         put_le16(p + 108, s->wwn >> 48);
204         put_le16(p + 109, s->wwn >> 32);
205         put_le16(p + 110, s->wwn >> 16);
206         put_le16(p + 111, s->wwn);
207     }
208     if (dev && dev->conf.discard_granularity) {
209         put_le16(p + 169, 1); /* TRIM support */
210     }
211     put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
212 
213     ide_identify_size(s);
214     s->identify_set = 1;
215 
216 fill_buffer:
217     memcpy(s->io_buffer, p, sizeof(s->identify_data));
218 }
219 
220 static void ide_atapi_identify(IDEState *s)
221 {
222     uint16_t *p;
223 
224     p = (uint16_t *)s->identify_data;
225     if (s->identify_set) {
226         goto fill_buffer;
227     }
228     memset(p, 0, sizeof(s->identify_data));
229 
230     /* Removable CDROM, 50us response, 12 byte packets */
231     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
232     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
233     put_le16(p + 20, 3); /* buffer type */
234     put_le16(p + 21, 512); /* cache size in sectors */
235     put_le16(p + 22, 4); /* ecc bytes */
236     padstr((char *)(p + 23), s->version, 8); /* firmware version */
237     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
238     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
239 #ifdef USE_DMA_CDROM
240     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
241     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
242     put_le16(p + 62, 7);  /* single word dma0-2 supported */
243     put_le16(p + 63, 7);  /* mdma0-2 supported */
244 #else
245     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
246     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
247     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
248 #endif
249     put_le16(p + 64, 3); /* pio3-4 supported */
250     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
251     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
252     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
253     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
254 
255     put_le16(p + 71, 30); /* in ns */
256     put_le16(p + 72, 30); /* in ns */
257 
258     if (s->ncq_queues) {
259         put_le16(p + 75, s->ncq_queues - 1);
260         /* NCQ supported */
261         put_le16(p + 76, (1 << 8));
262     }
263 
264     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
265     if (s->wwn) {
266         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
267         put_le16(p + 87, (1 << 8)); /* WWN enabled */
268     }
269 
270 #ifdef USE_DMA_CDROM
271     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
272 #endif
273 
274     if (s->wwn) {
275         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
276         put_le16(p + 108, s->wwn >> 48);
277         put_le16(p + 109, s->wwn >> 32);
278         put_le16(p + 110, s->wwn >> 16);
279         put_le16(p + 111, s->wwn);
280     }
281 
282     s->identify_set = 1;
283 
284 fill_buffer:
285     memcpy(s->io_buffer, p, sizeof(s->identify_data));
286 }
287 
288 static void ide_cfata_identify_size(IDEState *s)
289 {
290     uint16_t *p = (uint16_t *)s->identify_data;
291     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
292     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
293     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
294     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
295 }
296 
297 static void ide_cfata_identify(IDEState *s)
298 {
299     uint16_t *p;
300     uint32_t cur_sec;
301 
302     p = (uint16_t *)s->identify_data;
303     if (s->identify_set) {
304         goto fill_buffer;
305     }
306     memset(p, 0, sizeof(s->identify_data));
307 
308     cur_sec = s->cylinders * s->heads * s->sectors;
309 
310     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
311     put_le16(p + 1, s->cylinders);		/* Default cylinders */
312     put_le16(p + 3, s->heads);			/* Default heads */
313     put_le16(p + 6, s->sectors);		/* Default sectors per track */
314     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
315     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
316     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
317     put_le16(p + 22, 0x0004);			/* ECC bytes */
318     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
319     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
320 #if MAX_MULT_SECTORS > 1
321     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
322 #else
323     put_le16(p + 47, 0x0000);
324 #endif
325     put_le16(p + 49, 0x0f00);			/* Capabilities */
326     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
327     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
328     put_le16(p + 53, 0x0003);			/* Translation params valid */
329     put_le16(p + 54, s->cylinders);		/* Current cylinders */
330     put_le16(p + 55, s->heads);			/* Current heads */
331     put_le16(p + 56, s->sectors);		/* Current sectors */
332     put_le16(p + 57, cur_sec);			/* Current capacity */
333     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
334     if (s->mult_sectors)			/* Multiple sector setting */
335         put_le16(p + 59, 0x100 | s->mult_sectors);
336     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
337     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
338     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
339     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
340     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
341     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
342     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
343     put_le16(p + 82, 0x400c);			/* Command Set supported */
344     put_le16(p + 83, 0x7068);			/* Command Set supported */
345     put_le16(p + 84, 0x4000);			/* Features supported */
346     put_le16(p + 85, 0x000c);			/* Command Set enabled */
347     put_le16(p + 86, 0x7044);			/* Command Set enabled */
348     put_le16(p + 87, 0x4000);			/* Features enabled */
349     put_le16(p + 91, 0x4060);			/* Current APM level */
350     put_le16(p + 129, 0x0002);			/* Current features option */
351     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
352     put_le16(p + 131, 0x0001);			/* Initial power mode */
353     put_le16(p + 132, 0x0000);			/* User signature */
354     put_le16(p + 160, 0x8100);			/* Power requirement */
355     put_le16(p + 161, 0x8001);			/* CF command set */
356 
357     ide_cfata_identify_size(s);
358     s->identify_set = 1;
359 
360 fill_buffer:
361     memcpy(s->io_buffer, p, sizeof(s->identify_data));
362 }
363 
364 static void ide_set_signature(IDEState *s)
365 {
366     s->select &= 0xf0; /* clear head */
367     /* put signature */
368     s->nsector = 1;
369     s->sector = 1;
370     if (s->drive_kind == IDE_CD) {
371         s->lcyl = 0x14;
372         s->hcyl = 0xeb;
373     } else if (s->blk) {
374         s->lcyl = 0;
375         s->hcyl = 0;
376     } else {
377         s->lcyl = 0xff;
378         s->hcyl = 0xff;
379     }
380 }
381 
382 typedef struct TrimAIOCB {
383     BlockAIOCB common;
384     BlockBackend *blk;
385     QEMUBH *bh;
386     int ret;
387     QEMUIOVector *qiov;
388     BlockAIOCB *aiocb;
389     int i, j;
390 } TrimAIOCB;
391 
392 static void trim_aio_cancel(BlockAIOCB *acb)
393 {
394     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
395 
396     /* Exit the loop so ide_issue_trim_cb will not continue  */
397     iocb->j = iocb->qiov->niov - 1;
398     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
399 
400     iocb->ret = -ECANCELED;
401 
402     if (iocb->aiocb) {
403         blk_aio_cancel_async(iocb->aiocb);
404         iocb->aiocb = NULL;
405     }
406 }
407 
408 static const AIOCBInfo trim_aiocb_info = {
409     .aiocb_size         = sizeof(TrimAIOCB),
410     .cancel_async       = trim_aio_cancel,
411 };
412 
413 static void ide_trim_bh_cb(void *opaque)
414 {
415     TrimAIOCB *iocb = opaque;
416 
417     iocb->common.cb(iocb->common.opaque, iocb->ret);
418 
419     qemu_bh_delete(iocb->bh);
420     iocb->bh = NULL;
421     qemu_aio_unref(iocb);
422 }
423 
424 static void ide_issue_trim_cb(void *opaque, int ret)
425 {
426     TrimAIOCB *iocb = opaque;
427     if (ret >= 0) {
428         while (iocb->j < iocb->qiov->niov) {
429             int j = iocb->j;
430             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
431                 int i = iocb->i;
432                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
433 
434                 /* 6-byte LBA + 2-byte range per entry */
435                 uint64_t entry = le64_to_cpu(buffer[i]);
436                 uint64_t sector = entry & 0x0000ffffffffffffULL;
437                 uint16_t count = entry >> 48;
438 
439                 if (count == 0) {
440                     continue;
441                 }
442 
443                 /* Got an entry! Submit and exit.  */
444                 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
445                                                sector << BDRV_SECTOR_BITS,
446                                                count << BDRV_SECTOR_BITS,
447                                                ide_issue_trim_cb, opaque);
448                 return;
449             }
450 
451             iocb->j++;
452             iocb->i = -1;
453         }
454     } else {
455         iocb->ret = ret;
456     }
457 
458     iocb->aiocb = NULL;
459     if (iocb->bh) {
460         qemu_bh_schedule(iocb->bh);
461     }
462 }
463 
464 BlockAIOCB *ide_issue_trim(
465         int64_t offset, QEMUIOVector *qiov,
466         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
467 {
468     BlockBackend *blk = opaque;
469     TrimAIOCB *iocb;
470 
471     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
472     iocb->blk = blk;
473     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
474     iocb->ret = 0;
475     iocb->qiov = qiov;
476     iocb->i = -1;
477     iocb->j = 0;
478     ide_issue_trim_cb(iocb, 0);
479     return &iocb->common;
480 }
481 
482 void ide_abort_command(IDEState *s)
483 {
484     ide_transfer_stop(s);
485     s->status = READY_STAT | ERR_STAT;
486     s->error = ABRT_ERR;
487 }
488 
489 static void ide_set_retry(IDEState *s)
490 {
491     s->bus->retry_unit = s->unit;
492     s->bus->retry_sector_num = ide_get_sector(s);
493     s->bus->retry_nsector = s->nsector;
494 }
495 
496 static void ide_clear_retry(IDEState *s)
497 {
498     s->bus->retry_unit = -1;
499     s->bus->retry_sector_num = 0;
500     s->bus->retry_nsector = 0;
501 }
502 
503 /* prepare data transfer and tell what to do after */
504 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
505                         EndTransferFunc *end_transfer_func)
506 {
507     s->end_transfer_func = end_transfer_func;
508     s->data_ptr = buf;
509     s->data_end = buf + size;
510     ide_set_retry(s);
511     if (!(s->status & ERR_STAT)) {
512         s->status |= DRQ_STAT;
513     }
514     if (s->bus->dma->ops->start_transfer) {
515         s->bus->dma->ops->start_transfer(s->bus->dma);
516     }
517 }
518 
519 static void ide_cmd_done(IDEState *s)
520 {
521     if (s->bus->dma->ops->cmd_done) {
522         s->bus->dma->ops->cmd_done(s->bus->dma);
523     }
524 }
525 
526 static void ide_transfer_halt(IDEState *s,
527                               void(*end_transfer_func)(IDEState *),
528                               bool notify)
529 {
530     s->end_transfer_func = end_transfer_func;
531     s->data_ptr = s->io_buffer;
532     s->data_end = s->io_buffer;
533     s->status &= ~DRQ_STAT;
534     if (notify) {
535         ide_cmd_done(s);
536     }
537 }
538 
539 void ide_transfer_stop(IDEState *s)
540 {
541     ide_transfer_halt(s, ide_transfer_stop, true);
542 }
543 
544 static void ide_transfer_cancel(IDEState *s)
545 {
546     ide_transfer_halt(s, ide_transfer_cancel, false);
547 }
548 
549 int64_t ide_get_sector(IDEState *s)
550 {
551     int64_t sector_num;
552     if (s->select & 0x40) {
553         /* lba */
554 	if (!s->lba48) {
555 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
556 		(s->lcyl << 8) | s->sector;
557 	} else {
558 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
559 		((int64_t) s->hob_lcyl << 32) |
560 		((int64_t) s->hob_sector << 24) |
561 		((int64_t) s->hcyl << 16) |
562 		((int64_t) s->lcyl << 8) | s->sector;
563 	}
564     } else {
565         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
566             (s->select & 0x0f) * s->sectors + (s->sector - 1);
567     }
568     return sector_num;
569 }
570 
571 void ide_set_sector(IDEState *s, int64_t sector_num)
572 {
573     unsigned int cyl, r;
574     if (s->select & 0x40) {
575 	if (!s->lba48) {
576             s->select = (s->select & 0xf0) | (sector_num >> 24);
577             s->hcyl = (sector_num >> 16);
578             s->lcyl = (sector_num >> 8);
579             s->sector = (sector_num);
580 	} else {
581 	    s->sector = sector_num;
582 	    s->lcyl = sector_num >> 8;
583 	    s->hcyl = sector_num >> 16;
584 	    s->hob_sector = sector_num >> 24;
585 	    s->hob_lcyl = sector_num >> 32;
586 	    s->hob_hcyl = sector_num >> 40;
587 	}
588     } else {
589         cyl = sector_num / (s->heads * s->sectors);
590         r = sector_num % (s->heads * s->sectors);
591         s->hcyl = cyl >> 8;
592         s->lcyl = cyl;
593         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
594         s->sector = (r % s->sectors) + 1;
595     }
596 }
597 
598 static void ide_rw_error(IDEState *s) {
599     ide_abort_command(s);
600     ide_set_irq(s->bus);
601 }
602 
603 static bool ide_sect_range_ok(IDEState *s,
604                               uint64_t sector, uint64_t nb_sectors)
605 {
606     uint64_t total_sectors;
607 
608     blk_get_geometry(s->blk, &total_sectors);
609     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
610         return false;
611     }
612     return true;
613 }
614 
615 static void ide_buffered_readv_cb(void *opaque, int ret)
616 {
617     IDEBufferedRequest *req = opaque;
618     if (!req->orphaned) {
619         if (!ret) {
620             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
621                                 req->original_qiov->size);
622         }
623         req->original_cb(req->original_opaque, ret);
624     }
625     QLIST_REMOVE(req, list);
626     qemu_vfree(req->iov.iov_base);
627     g_free(req);
628 }
629 
630 #define MAX_BUFFERED_REQS 16
631 
632 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
633                                QEMUIOVector *iov, int nb_sectors,
634                                BlockCompletionFunc *cb, void *opaque)
635 {
636     BlockAIOCB *aioreq;
637     IDEBufferedRequest *req;
638     int c = 0;
639 
640     QLIST_FOREACH(req, &s->buffered_requests, list) {
641         c++;
642     }
643     if (c > MAX_BUFFERED_REQS) {
644         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
645     }
646 
647     req = g_new0(IDEBufferedRequest, 1);
648     req->original_qiov = iov;
649     req->original_cb = cb;
650     req->original_opaque = opaque;
651     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
652     req->iov.iov_len = iov->size;
653     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
654 
655     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
656                             &req->qiov, 0, ide_buffered_readv_cb, req);
657 
658     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
659     return aioreq;
660 }
661 
662 /**
663  * Cancel all pending DMA requests.
664  * Any buffered DMA requests are instantly canceled,
665  * but any pending unbuffered DMA requests must be waited on.
666  */
667 void ide_cancel_dma_sync(IDEState *s)
668 {
669     IDEBufferedRequest *req;
670 
671     /* First invoke the callbacks of all buffered requests
672      * and flag those requests as orphaned. Ideally there
673      * are no unbuffered (Scatter Gather DMA Requests or
674      * write requests) pending and we can avoid to drain. */
675     QLIST_FOREACH(req, &s->buffered_requests, list) {
676         if (!req->orphaned) {
677             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
678             req->original_cb(req->original_opaque, -ECANCELED);
679         }
680         req->orphaned = true;
681     }
682 
683     /*
684      * We can't cancel Scatter Gather DMA in the middle of the
685      * operation or a partial (not full) DMA transfer would reach
686      * the storage so we wait for completion instead (we beahve
687      * like if the DMA was completed by the time the guest trying
688      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
689      * set).
690      *
691      * In the future we'll be able to safely cancel the I/O if the
692      * whole DMA operation will be submitted to disk with a single
693      * aio operation with preadv/pwritev.
694      */
695     if (s->bus->dma->aiocb) {
696         trace_ide_cancel_dma_sync_remaining();
697         blk_drain(s->blk);
698         assert(s->bus->dma->aiocb == NULL);
699     }
700 }
701 
702 static void ide_sector_read(IDEState *s);
703 
704 static void ide_sector_read_cb(void *opaque, int ret)
705 {
706     IDEState *s = opaque;
707     int n;
708 
709     s->pio_aiocb = NULL;
710     s->status &= ~BUSY_STAT;
711 
712     if (ret == -ECANCELED) {
713         return;
714     }
715     if (ret != 0) {
716         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
717                                 IDE_RETRY_READ)) {
718             return;
719         }
720     }
721 
722     block_acct_done(blk_get_stats(s->blk), &s->acct);
723 
724     n = s->nsector;
725     if (n > s->req_nb_sectors) {
726         n = s->req_nb_sectors;
727     }
728 
729     ide_set_sector(s, ide_get_sector(s) + n);
730     s->nsector -= n;
731     /* Allow the guest to read the io_buffer */
732     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
733     ide_set_irq(s->bus);
734 }
735 
736 static void ide_sector_read(IDEState *s)
737 {
738     int64_t sector_num;
739     int n;
740 
741     s->status = READY_STAT | SEEK_STAT;
742     s->error = 0; /* not needed by IDE spec, but needed by Windows */
743     sector_num = ide_get_sector(s);
744     n = s->nsector;
745 
746     if (n == 0) {
747         ide_transfer_stop(s);
748         return;
749     }
750 
751     s->status |= BUSY_STAT;
752 
753     if (n > s->req_nb_sectors) {
754         n = s->req_nb_sectors;
755     }
756 
757     trace_ide_sector_read(sector_num, n);
758 
759     if (!ide_sect_range_ok(s, sector_num, n)) {
760         ide_rw_error(s);
761         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
762         return;
763     }
764 
765     s->iov.iov_base = s->io_buffer;
766     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
767     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
768 
769     block_acct_start(blk_get_stats(s->blk), &s->acct,
770                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
771     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
772                                       ide_sector_read_cb, s);
773 }
774 
775 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
776 {
777     if (s->bus->dma->ops->commit_buf) {
778         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
779     }
780     s->io_buffer_offset += tx_bytes;
781     qemu_sglist_destroy(&s->sg);
782 }
783 
784 void ide_set_inactive(IDEState *s, bool more)
785 {
786     s->bus->dma->aiocb = NULL;
787     ide_clear_retry(s);
788     if (s->bus->dma->ops->set_inactive) {
789         s->bus->dma->ops->set_inactive(s->bus->dma, more);
790     }
791     ide_cmd_done(s);
792 }
793 
794 void ide_dma_error(IDEState *s)
795 {
796     dma_buf_commit(s, 0);
797     ide_abort_command(s);
798     ide_set_inactive(s, false);
799     ide_set_irq(s->bus);
800 }
801 
802 int ide_handle_rw_error(IDEState *s, int error, int op)
803 {
804     bool is_read = (op & IDE_RETRY_READ) != 0;
805     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
806 
807     if (action == BLOCK_ERROR_ACTION_STOP) {
808         assert(s->bus->retry_unit == s->unit);
809         s->bus->error_status = op;
810     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
811         block_acct_failed(blk_get_stats(s->blk), &s->acct);
812         if (IS_IDE_RETRY_DMA(op)) {
813             ide_dma_error(s);
814         } else if (IS_IDE_RETRY_ATAPI(op)) {
815             ide_atapi_io_error(s, -error);
816         } else {
817             ide_rw_error(s);
818         }
819     }
820     blk_error_action(s->blk, action, is_read, error);
821     return action != BLOCK_ERROR_ACTION_IGNORE;
822 }
823 
824 static void ide_dma_cb(void *opaque, int ret)
825 {
826     IDEState *s = opaque;
827     int n;
828     int64_t sector_num;
829     uint64_t offset;
830     bool stay_active = false;
831 
832     if (ret == -ECANCELED) {
833         return;
834     }
835     if (ret < 0) {
836         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
837             s->bus->dma->aiocb = NULL;
838             dma_buf_commit(s, 0);
839             return;
840         }
841     }
842 
843     n = s->io_buffer_size >> 9;
844     if (n > s->nsector) {
845         /* The PRDs were longer than needed for this request. Shorten them so
846          * we don't get a negative remainder. The Active bit must remain set
847          * after the request completes. */
848         n = s->nsector;
849         stay_active = true;
850     }
851 
852     sector_num = ide_get_sector(s);
853     if (n > 0) {
854         assert(n * 512 == s->sg.size);
855         dma_buf_commit(s, s->sg.size);
856         sector_num += n;
857         ide_set_sector(s, sector_num);
858         s->nsector -= n;
859     }
860 
861     /* end of transfer ? */
862     if (s->nsector == 0) {
863         s->status = READY_STAT | SEEK_STAT;
864         ide_set_irq(s->bus);
865         goto eot;
866     }
867 
868     /* launch next transfer */
869     n = s->nsector;
870     s->io_buffer_index = 0;
871     s->io_buffer_size = n * 512;
872     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
873         /* The PRDs were too short. Reset the Active bit, but don't raise an
874          * interrupt. */
875         s->status = READY_STAT | SEEK_STAT;
876         dma_buf_commit(s, 0);
877         goto eot;
878     }
879 
880     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
881 
882     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
883         !ide_sect_range_ok(s, sector_num, n)) {
884         ide_dma_error(s);
885         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
886         return;
887     }
888 
889     offset = sector_num << BDRV_SECTOR_BITS;
890     switch (s->dma_cmd) {
891     case IDE_DMA_READ:
892         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
893                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
894         break;
895     case IDE_DMA_WRITE:
896         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
897                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
898         break;
899     case IDE_DMA_TRIM:
900         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
901                                         &s->sg, offset, BDRV_SECTOR_SIZE,
902                                         ide_issue_trim, s->blk, ide_dma_cb, s,
903                                         DMA_DIRECTION_TO_DEVICE);
904         break;
905     default:
906         abort();
907     }
908     return;
909 
910 eot:
911     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
912         block_acct_done(blk_get_stats(s->blk), &s->acct);
913     }
914     ide_set_inactive(s, stay_active);
915 }
916 
917 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
918 {
919     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
920     s->io_buffer_size = 0;
921     s->dma_cmd = dma_cmd;
922 
923     switch (dma_cmd) {
924     case IDE_DMA_READ:
925         block_acct_start(blk_get_stats(s->blk), &s->acct,
926                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
927         break;
928     case IDE_DMA_WRITE:
929         block_acct_start(blk_get_stats(s->blk), &s->acct,
930                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
931         break;
932     default:
933         break;
934     }
935 
936     ide_start_dma(s, ide_dma_cb);
937 }
938 
939 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
940 {
941     s->io_buffer_index = 0;
942     ide_set_retry(s);
943     if (s->bus->dma->ops->start_dma) {
944         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
945     }
946 }
947 
948 static void ide_sector_write(IDEState *s);
949 
950 static void ide_sector_write_timer_cb(void *opaque)
951 {
952     IDEState *s = opaque;
953     ide_set_irq(s->bus);
954 }
955 
956 static void ide_sector_write_cb(void *opaque, int ret)
957 {
958     IDEState *s = opaque;
959     int n;
960 
961     if (ret == -ECANCELED) {
962         return;
963     }
964 
965     s->pio_aiocb = NULL;
966     s->status &= ~BUSY_STAT;
967 
968     if (ret != 0) {
969         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
970             return;
971         }
972     }
973 
974     block_acct_done(blk_get_stats(s->blk), &s->acct);
975 
976     n = s->nsector;
977     if (n > s->req_nb_sectors) {
978         n = s->req_nb_sectors;
979     }
980     s->nsector -= n;
981 
982     ide_set_sector(s, ide_get_sector(s) + n);
983     if (s->nsector == 0) {
984         /* no more sectors to write */
985         ide_transfer_stop(s);
986     } else {
987         int n1 = s->nsector;
988         if (n1 > s->req_nb_sectors) {
989             n1 = s->req_nb_sectors;
990         }
991         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
992                            ide_sector_write);
993     }
994 
995     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
996         /* It seems there is a bug in the Windows 2000 installer HDD
997            IDE driver which fills the disk with empty logs when the
998            IDE write IRQ comes too early. This hack tries to correct
999            that at the expense of slower write performances. Use this
1000            option _only_ to install Windows 2000. You must disable it
1001            for normal use. */
1002         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1003                   (NANOSECONDS_PER_SECOND / 1000));
1004     } else {
1005         ide_set_irq(s->bus);
1006     }
1007 }
1008 
1009 static void ide_sector_write(IDEState *s)
1010 {
1011     int64_t sector_num;
1012     int n;
1013 
1014     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1015     sector_num = ide_get_sector(s);
1016 
1017     n = s->nsector;
1018     if (n > s->req_nb_sectors) {
1019         n = s->req_nb_sectors;
1020     }
1021 
1022     trace_ide_sector_write(sector_num, n);
1023 
1024     if (!ide_sect_range_ok(s, sector_num, n)) {
1025         ide_rw_error(s);
1026         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1027         return;
1028     }
1029 
1030     s->iov.iov_base = s->io_buffer;
1031     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1032     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1033 
1034     block_acct_start(blk_get_stats(s->blk), &s->acct,
1035                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1036     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1037                                    &s->qiov, 0, ide_sector_write_cb, s);
1038 }
1039 
1040 static void ide_flush_cb(void *opaque, int ret)
1041 {
1042     IDEState *s = opaque;
1043 
1044     s->pio_aiocb = NULL;
1045 
1046     if (ret == -ECANCELED) {
1047         return;
1048     }
1049     if (ret < 0) {
1050         /* XXX: What sector number to set here? */
1051         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1052             return;
1053         }
1054     }
1055 
1056     if (s->blk) {
1057         block_acct_done(blk_get_stats(s->blk), &s->acct);
1058     }
1059     s->status = READY_STAT | SEEK_STAT;
1060     ide_cmd_done(s);
1061     ide_set_irq(s->bus);
1062 }
1063 
1064 static void ide_flush_cache(IDEState *s)
1065 {
1066     if (s->blk == NULL) {
1067         ide_flush_cb(s, 0);
1068         return;
1069     }
1070 
1071     s->status |= BUSY_STAT;
1072     ide_set_retry(s);
1073     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1074 
1075     if (blk_bs(s->blk)) {
1076         s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1077     } else {
1078         /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1079          * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1080          */
1081         ide_flush_cb(s, 0);
1082     }
1083 }
1084 
1085 static void ide_cfata_metadata_inquiry(IDEState *s)
1086 {
1087     uint16_t *p;
1088     uint32_t spd;
1089 
1090     p = (uint16_t *) s->io_buffer;
1091     memset(p, 0, 0x200);
1092     spd = ((s->mdata_size - 1) >> 9) + 1;
1093 
1094     put_le16(p + 0, 0x0001);			/* Data format revision */
1095     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1096     put_le16(p + 2, s->media_changed);		/* Media status */
1097     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1098     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1099     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1100     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1101 }
1102 
1103 static void ide_cfata_metadata_read(IDEState *s)
1104 {
1105     uint16_t *p;
1106 
1107     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1108         s->status = ERR_STAT;
1109         s->error = ABRT_ERR;
1110         return;
1111     }
1112 
1113     p = (uint16_t *) s->io_buffer;
1114     memset(p, 0, 0x200);
1115 
1116     put_le16(p + 0, s->media_changed);		/* Media status */
1117     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1118                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1119                                     s->nsector << 9), 0x200 - 2));
1120 }
1121 
1122 static void ide_cfata_metadata_write(IDEState *s)
1123 {
1124     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1125         s->status = ERR_STAT;
1126         s->error = ABRT_ERR;
1127         return;
1128     }
1129 
1130     s->media_changed = 0;
1131 
1132     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1133                     s->io_buffer + 2,
1134                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1135                                     s->nsector << 9), 0x200 - 2));
1136 }
1137 
1138 /* called when the inserted state of the media has changed */
1139 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1140 {
1141     IDEState *s = opaque;
1142     uint64_t nb_sectors;
1143 
1144     s->tray_open = !load;
1145     blk_get_geometry(s->blk, &nb_sectors);
1146     s->nb_sectors = nb_sectors;
1147 
1148     /*
1149      * First indicate to the guest that a CD has been removed.  That's
1150      * done on the next command the guest sends us.
1151      *
1152      * Then we set UNIT_ATTENTION, by which the guest will
1153      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1154      */
1155     s->cdrom_changed = 1;
1156     s->events.new_media = true;
1157     s->events.eject_request = false;
1158     ide_set_irq(s->bus);
1159 }
1160 
1161 static void ide_cd_eject_request_cb(void *opaque, bool force)
1162 {
1163     IDEState *s = opaque;
1164 
1165     s->events.eject_request = true;
1166     if (force) {
1167         s->tray_locked = false;
1168     }
1169     ide_set_irq(s->bus);
1170 }
1171 
1172 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1173 {
1174     s->lba48 = lba48;
1175 
1176     /* handle the 'magic' 0 nsector count conversion here. to avoid
1177      * fiddling with the rest of the read logic, we just store the
1178      * full sector count in ->nsector and ignore ->hob_nsector from now
1179      */
1180     if (!s->lba48) {
1181 	if (!s->nsector)
1182 	    s->nsector = 256;
1183     } else {
1184 	if (!s->nsector && !s->hob_nsector)
1185 	    s->nsector = 65536;
1186 	else {
1187 	    int lo = s->nsector;
1188 	    int hi = s->hob_nsector;
1189 
1190 	    s->nsector = (hi << 8) | lo;
1191 	}
1192     }
1193 }
1194 
1195 static void ide_clear_hob(IDEBus *bus)
1196 {
1197     /* any write clears HOB high bit of device control register */
1198     bus->ifs[0].select &= ~(1 << 7);
1199     bus->ifs[1].select &= ~(1 << 7);
1200 }
1201 
1202 /* IOport [W]rite [R]egisters */
1203 enum ATA_IOPORT_WR {
1204     ATA_IOPORT_WR_DATA = 0,
1205     ATA_IOPORT_WR_FEATURES = 1,
1206     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1207     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1208     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1209     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1210     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1211     ATA_IOPORT_WR_COMMAND = 7,
1212     ATA_IOPORT_WR_NUM_REGISTERS,
1213 };
1214 
1215 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1216     [ATA_IOPORT_WR_DATA] = "Data",
1217     [ATA_IOPORT_WR_FEATURES] = "Features",
1218     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1219     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1220     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1221     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1222     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1223     [ATA_IOPORT_WR_COMMAND] = "Command"
1224 };
1225 
1226 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1227 {
1228     IDEBus *bus = opaque;
1229     IDEState *s = idebus_active_if(bus);
1230     int reg_num = addr & 7;
1231 
1232     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1233 
1234     /* ignore writes to command block while busy with previous command */
1235     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1236         return;
1237     }
1238 
1239     switch (reg_num) {
1240     case 0:
1241         break;
1242     case ATA_IOPORT_WR_FEATURES:
1243         ide_clear_hob(bus);
1244         /* NOTE: data is written to the two drives */
1245         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1246         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1247         bus->ifs[0].feature = val;
1248         bus->ifs[1].feature = val;
1249         break;
1250     case ATA_IOPORT_WR_SECTOR_COUNT:
1251 	ide_clear_hob(bus);
1252 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1253 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1254         bus->ifs[0].nsector = val;
1255         bus->ifs[1].nsector = val;
1256         break;
1257     case ATA_IOPORT_WR_SECTOR_NUMBER:
1258 	ide_clear_hob(bus);
1259 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1260 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1261         bus->ifs[0].sector = val;
1262         bus->ifs[1].sector = val;
1263         break;
1264     case ATA_IOPORT_WR_CYLINDER_LOW:
1265 	ide_clear_hob(bus);
1266 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1267 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1268         bus->ifs[0].lcyl = val;
1269         bus->ifs[1].lcyl = val;
1270         break;
1271     case ATA_IOPORT_WR_CYLINDER_HIGH:
1272 	ide_clear_hob(bus);
1273 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1274 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1275         bus->ifs[0].hcyl = val;
1276         bus->ifs[1].hcyl = val;
1277         break;
1278     case ATA_IOPORT_WR_DEVICE_HEAD:
1279 	/* FIXME: HOB readback uses bit 7 */
1280         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1281         bus->ifs[1].select = (val | 0x10) | 0xa0;
1282         /* select drive */
1283         bus->unit = (val >> 4) & 1;
1284         break;
1285     default:
1286     case ATA_IOPORT_WR_COMMAND:
1287         /* command */
1288         ide_exec_cmd(bus, val);
1289         break;
1290     }
1291 }
1292 
1293 static void ide_reset(IDEState *s)
1294 {
1295     trace_ide_reset(s);
1296 
1297     if (s->pio_aiocb) {
1298         blk_aio_cancel(s->pio_aiocb);
1299         s->pio_aiocb = NULL;
1300     }
1301 
1302     if (s->drive_kind == IDE_CFATA)
1303         s->mult_sectors = 0;
1304     else
1305         s->mult_sectors = MAX_MULT_SECTORS;
1306     /* ide regs */
1307     s->feature = 0;
1308     s->error = 0;
1309     s->nsector = 0;
1310     s->sector = 0;
1311     s->lcyl = 0;
1312     s->hcyl = 0;
1313 
1314     /* lba48 */
1315     s->hob_feature = 0;
1316     s->hob_sector = 0;
1317     s->hob_nsector = 0;
1318     s->hob_lcyl = 0;
1319     s->hob_hcyl = 0;
1320 
1321     s->select = 0xa0;
1322     s->status = READY_STAT | SEEK_STAT;
1323 
1324     s->lba48 = 0;
1325 
1326     /* ATAPI specific */
1327     s->sense_key = 0;
1328     s->asc = 0;
1329     s->cdrom_changed = 0;
1330     s->packet_transfer_size = 0;
1331     s->elementary_transfer_size = 0;
1332     s->io_buffer_index = 0;
1333     s->cd_sector_size = 0;
1334     s->atapi_dma = 0;
1335     s->tray_locked = 0;
1336     s->tray_open = 0;
1337     /* ATA DMA state */
1338     s->io_buffer_size = 0;
1339     s->req_nb_sectors = 0;
1340 
1341     ide_set_signature(s);
1342     /* init the transfer handler so that 0xffff is returned on data
1343        accesses */
1344     s->end_transfer_func = ide_dummy_transfer_stop;
1345     ide_dummy_transfer_stop(s);
1346     s->media_changed = 0;
1347 }
1348 
1349 static bool cmd_nop(IDEState *s, uint8_t cmd)
1350 {
1351     return true;
1352 }
1353 
1354 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1355 {
1356     /* Halt PIO (in the DRQ phase), then DMA */
1357     ide_transfer_cancel(s);
1358     ide_cancel_dma_sync(s);
1359 
1360     /* Reset any PIO commands, reset signature, etc */
1361     ide_reset(s);
1362 
1363     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1364      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1365     s->status = 0x00;
1366 
1367     /* Do not overwrite status register */
1368     return false;
1369 }
1370 
1371 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1372 {
1373     switch (s->feature) {
1374     case DSM_TRIM:
1375         if (s->blk) {
1376             ide_sector_start_dma(s, IDE_DMA_TRIM);
1377             return false;
1378         }
1379         break;
1380     }
1381 
1382     ide_abort_command(s);
1383     return true;
1384 }
1385 
1386 static bool cmd_identify(IDEState *s, uint8_t cmd)
1387 {
1388     if (s->blk && s->drive_kind != IDE_CD) {
1389         if (s->drive_kind != IDE_CFATA) {
1390             ide_identify(s);
1391         } else {
1392             ide_cfata_identify(s);
1393         }
1394         s->status = READY_STAT | SEEK_STAT;
1395         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1396         ide_set_irq(s->bus);
1397         return false;
1398     } else {
1399         if (s->drive_kind == IDE_CD) {
1400             ide_set_signature(s);
1401         }
1402         ide_abort_command(s);
1403     }
1404 
1405     return true;
1406 }
1407 
1408 static bool cmd_verify(IDEState *s, uint8_t cmd)
1409 {
1410     bool lba48 = (cmd == WIN_VERIFY_EXT);
1411 
1412     /* do sector number check ? */
1413     ide_cmd_lba48_transform(s, lba48);
1414 
1415     return true;
1416 }
1417 
1418 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1419 {
1420     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1421         /* Disable Read and Write Multiple */
1422         s->mult_sectors = 0;
1423     } else if ((s->nsector & 0xff) != 0 &&
1424         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1425          (s->nsector & (s->nsector - 1)) != 0)) {
1426         ide_abort_command(s);
1427     } else {
1428         s->mult_sectors = s->nsector & 0xff;
1429     }
1430 
1431     return true;
1432 }
1433 
1434 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1435 {
1436     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1437 
1438     if (!s->blk || !s->mult_sectors) {
1439         ide_abort_command(s);
1440         return true;
1441     }
1442 
1443     ide_cmd_lba48_transform(s, lba48);
1444     s->req_nb_sectors = s->mult_sectors;
1445     ide_sector_read(s);
1446     return false;
1447 }
1448 
1449 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1450 {
1451     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1452     int n;
1453 
1454     if (!s->blk || !s->mult_sectors) {
1455         ide_abort_command(s);
1456         return true;
1457     }
1458 
1459     ide_cmd_lba48_transform(s, lba48);
1460 
1461     s->req_nb_sectors = s->mult_sectors;
1462     n = MIN(s->nsector, s->req_nb_sectors);
1463 
1464     s->status = SEEK_STAT | READY_STAT;
1465     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1466 
1467     s->media_changed = 1;
1468 
1469     return false;
1470 }
1471 
1472 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1473 {
1474     bool lba48 = (cmd == WIN_READ_EXT);
1475 
1476     if (s->drive_kind == IDE_CD) {
1477         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1478         ide_abort_command(s);
1479         return true;
1480     }
1481 
1482     if (!s->blk) {
1483         ide_abort_command(s);
1484         return true;
1485     }
1486 
1487     ide_cmd_lba48_transform(s, lba48);
1488     s->req_nb_sectors = 1;
1489     ide_sector_read(s);
1490 
1491     return false;
1492 }
1493 
1494 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1495 {
1496     bool lba48 = (cmd == WIN_WRITE_EXT);
1497 
1498     if (!s->blk) {
1499         ide_abort_command(s);
1500         return true;
1501     }
1502 
1503     ide_cmd_lba48_transform(s, lba48);
1504 
1505     s->req_nb_sectors = 1;
1506     s->status = SEEK_STAT | READY_STAT;
1507     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1508 
1509     s->media_changed = 1;
1510 
1511     return false;
1512 }
1513 
1514 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1515 {
1516     bool lba48 = (cmd == WIN_READDMA_EXT);
1517 
1518     if (!s->blk) {
1519         ide_abort_command(s);
1520         return true;
1521     }
1522 
1523     ide_cmd_lba48_transform(s, lba48);
1524     ide_sector_start_dma(s, IDE_DMA_READ);
1525 
1526     return false;
1527 }
1528 
1529 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1530 {
1531     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1532 
1533     if (!s->blk) {
1534         ide_abort_command(s);
1535         return true;
1536     }
1537 
1538     ide_cmd_lba48_transform(s, lba48);
1539     ide_sector_start_dma(s, IDE_DMA_WRITE);
1540 
1541     s->media_changed = 1;
1542 
1543     return false;
1544 }
1545 
1546 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1547 {
1548     ide_flush_cache(s);
1549     return false;
1550 }
1551 
1552 static bool cmd_seek(IDEState *s, uint8_t cmd)
1553 {
1554     /* XXX: Check that seek is within bounds */
1555     return true;
1556 }
1557 
1558 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1559 {
1560     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1561 
1562     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1563     if (s->nb_sectors == 0) {
1564         ide_abort_command(s);
1565         return true;
1566     }
1567 
1568     ide_cmd_lba48_transform(s, lba48);
1569     ide_set_sector(s, s->nb_sectors - 1);
1570 
1571     return true;
1572 }
1573 
1574 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1575 {
1576     s->nsector = 0xff; /* device active or idle */
1577     return true;
1578 }
1579 
1580 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1581 {
1582     uint16_t *identify_data;
1583 
1584     if (!s->blk) {
1585         ide_abort_command(s);
1586         return true;
1587     }
1588 
1589     /* XXX: valid for CDROM ? */
1590     switch (s->feature) {
1591     case 0x02: /* write cache enable */
1592         blk_set_enable_write_cache(s->blk, true);
1593         identify_data = (uint16_t *)s->identify_data;
1594         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1595         return true;
1596     case 0x82: /* write cache disable */
1597         blk_set_enable_write_cache(s->blk, false);
1598         identify_data = (uint16_t *)s->identify_data;
1599         put_le16(identify_data + 85, (1 << 14) | 1);
1600         ide_flush_cache(s);
1601         return false;
1602     case 0xcc: /* reverting to power-on defaults enable */
1603     case 0x66: /* reverting to power-on defaults disable */
1604     case 0xaa: /* read look-ahead enable */
1605     case 0x55: /* read look-ahead disable */
1606     case 0x05: /* set advanced power management mode */
1607     case 0x85: /* disable advanced power management mode */
1608     case 0x69: /* NOP */
1609     case 0x67: /* NOP */
1610     case 0x96: /* NOP */
1611     case 0x9a: /* NOP */
1612     case 0x42: /* enable Automatic Acoustic Mode */
1613     case 0xc2: /* disable Automatic Acoustic Mode */
1614         return true;
1615     case 0x03: /* set transfer mode */
1616         {
1617             uint8_t val = s->nsector & 0x07;
1618             identify_data = (uint16_t *)s->identify_data;
1619 
1620             switch (s->nsector >> 3) {
1621             case 0x00: /* pio default */
1622             case 0x01: /* pio mode */
1623                 put_le16(identify_data + 62, 0x07);
1624                 put_le16(identify_data + 63, 0x07);
1625                 put_le16(identify_data + 88, 0x3f);
1626                 break;
1627             case 0x02: /* sigle word dma mode*/
1628                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1629                 put_le16(identify_data + 63, 0x07);
1630                 put_le16(identify_data + 88, 0x3f);
1631                 break;
1632             case 0x04: /* mdma mode */
1633                 put_le16(identify_data + 62, 0x07);
1634                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1635                 put_le16(identify_data + 88, 0x3f);
1636                 break;
1637             case 0x08: /* udma mode */
1638                 put_le16(identify_data + 62, 0x07);
1639                 put_le16(identify_data + 63, 0x07);
1640                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1641                 break;
1642             default:
1643                 goto abort_cmd;
1644             }
1645             return true;
1646         }
1647     }
1648 
1649 abort_cmd:
1650     ide_abort_command(s);
1651     return true;
1652 }
1653 
1654 
1655 /*** ATAPI commands ***/
1656 
1657 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1658 {
1659     ide_atapi_identify(s);
1660     s->status = READY_STAT | SEEK_STAT;
1661     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1662     ide_set_irq(s->bus);
1663     return false;
1664 }
1665 
1666 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1667 {
1668     ide_set_signature(s);
1669 
1670     if (s->drive_kind == IDE_CD) {
1671         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1672                         * devices to return a clear status register
1673                         * with READY_STAT *not* set. */
1674         s->error = 0x01;
1675     } else {
1676         s->status = READY_STAT | SEEK_STAT;
1677         /* The bits of the error register are not as usual for this command!
1678          * They are part of the regular output (this is why ERR_STAT isn't set)
1679          * Device 0 passed, Device 1 passed or not present. */
1680         s->error = 0x01;
1681         ide_set_irq(s->bus);
1682     }
1683 
1684     return false;
1685 }
1686 
1687 static bool cmd_packet(IDEState *s, uint8_t cmd)
1688 {
1689     /* overlapping commands not supported */
1690     if (s->feature & 0x02) {
1691         ide_abort_command(s);
1692         return true;
1693     }
1694 
1695     s->status = READY_STAT | SEEK_STAT;
1696     s->atapi_dma = s->feature & 1;
1697     if (s->atapi_dma) {
1698         s->dma_cmd = IDE_DMA_ATAPI;
1699     }
1700     s->nsector = 1;
1701     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1702                        ide_atapi_cmd);
1703     return false;
1704 }
1705 
1706 
1707 /*** CF-ATA commands ***/
1708 
1709 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1710 {
1711     s->error = 0x09;    /* miscellaneous error */
1712     s->status = READY_STAT | SEEK_STAT;
1713     ide_set_irq(s->bus);
1714 
1715     return false;
1716 }
1717 
1718 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1719 {
1720     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1721      * required for Windows 8 to work with AHCI */
1722 
1723     if (cmd == CFA_WEAR_LEVEL) {
1724         s->nsector = 0;
1725     }
1726 
1727     if (cmd == CFA_ERASE_SECTORS) {
1728         s->media_changed = 1;
1729     }
1730 
1731     return true;
1732 }
1733 
1734 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1735 {
1736     s->status = READY_STAT | SEEK_STAT;
1737 
1738     memset(s->io_buffer, 0, 0x200);
1739     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1740     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1741     s->io_buffer[0x02] = s->select;                 /* Head */
1742     s->io_buffer[0x03] = s->sector;                 /* Sector */
1743     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1744     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1745     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1746     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1747     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1748     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1749     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1750 
1751     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1752     ide_set_irq(s->bus);
1753 
1754     return false;
1755 }
1756 
1757 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1758 {
1759     switch (s->feature) {
1760     case 0x02:  /* Inquiry Metadata Storage */
1761         ide_cfata_metadata_inquiry(s);
1762         break;
1763     case 0x03:  /* Read Metadata Storage */
1764         ide_cfata_metadata_read(s);
1765         break;
1766     case 0x04:  /* Write Metadata Storage */
1767         ide_cfata_metadata_write(s);
1768         break;
1769     default:
1770         ide_abort_command(s);
1771         return true;
1772     }
1773 
1774     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1775     s->status = 0x00; /* NOTE: READY is _not_ set */
1776     ide_set_irq(s->bus);
1777 
1778     return false;
1779 }
1780 
1781 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1782 {
1783     switch (s->feature) {
1784     case 0x01:  /* sense temperature in device */
1785         s->nsector = 0x50;      /* +20 C */
1786         break;
1787     default:
1788         ide_abort_command(s);
1789         return true;
1790     }
1791 
1792     return true;
1793 }
1794 
1795 
1796 /*** SMART commands ***/
1797 
1798 static bool cmd_smart(IDEState *s, uint8_t cmd)
1799 {
1800     int n;
1801 
1802     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1803         goto abort_cmd;
1804     }
1805 
1806     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1807         goto abort_cmd;
1808     }
1809 
1810     switch (s->feature) {
1811     case SMART_DISABLE:
1812         s->smart_enabled = 0;
1813         return true;
1814 
1815     case SMART_ENABLE:
1816         s->smart_enabled = 1;
1817         return true;
1818 
1819     case SMART_ATTR_AUTOSAVE:
1820         switch (s->sector) {
1821         case 0x00:
1822             s->smart_autosave = 0;
1823             break;
1824         case 0xf1:
1825             s->smart_autosave = 1;
1826             break;
1827         default:
1828             goto abort_cmd;
1829         }
1830         return true;
1831 
1832     case SMART_STATUS:
1833         if (!s->smart_errors) {
1834             s->hcyl = 0xc2;
1835             s->lcyl = 0x4f;
1836         } else {
1837             s->hcyl = 0x2c;
1838             s->lcyl = 0xf4;
1839         }
1840         return true;
1841 
1842     case SMART_READ_THRESH:
1843         memset(s->io_buffer, 0, 0x200);
1844         s->io_buffer[0] = 0x01; /* smart struct version */
1845 
1846         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1847             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1848             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1849         }
1850 
1851         /* checksum */
1852         for (n = 0; n < 511; n++) {
1853             s->io_buffer[511] += s->io_buffer[n];
1854         }
1855         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1856 
1857         s->status = READY_STAT | SEEK_STAT;
1858         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1859         ide_set_irq(s->bus);
1860         return false;
1861 
1862     case SMART_READ_DATA:
1863         memset(s->io_buffer, 0, 0x200);
1864         s->io_buffer[0] = 0x01; /* smart struct version */
1865 
1866         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1867             int i;
1868             for (i = 0; i < 11; i++) {
1869                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1870             }
1871         }
1872 
1873         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1874         if (s->smart_selftest_count == 0) {
1875             s->io_buffer[363] = 0;
1876         } else {
1877             s->io_buffer[363] =
1878                 s->smart_selftest_data[3 +
1879                            (s->smart_selftest_count - 1) *
1880                            24];
1881         }
1882         s->io_buffer[364] = 0x20;
1883         s->io_buffer[365] = 0x01;
1884         /* offline data collection capacity: execute + self-test*/
1885         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1886         s->io_buffer[368] = 0x03; /* smart capability (1) */
1887         s->io_buffer[369] = 0x00; /* smart capability (2) */
1888         s->io_buffer[370] = 0x01; /* error logging supported */
1889         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1890         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1891         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1892 
1893         for (n = 0; n < 511; n++) {
1894             s->io_buffer[511] += s->io_buffer[n];
1895         }
1896         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1897 
1898         s->status = READY_STAT | SEEK_STAT;
1899         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1900         ide_set_irq(s->bus);
1901         return false;
1902 
1903     case SMART_READ_LOG:
1904         switch (s->sector) {
1905         case 0x01: /* summary smart error log */
1906             memset(s->io_buffer, 0, 0x200);
1907             s->io_buffer[0] = 0x01;
1908             s->io_buffer[1] = 0x00; /* no error entries */
1909             s->io_buffer[452] = s->smart_errors & 0xff;
1910             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1911 
1912             for (n = 0; n < 511; n++) {
1913                 s->io_buffer[511] += s->io_buffer[n];
1914             }
1915             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1916             break;
1917         case 0x06: /* smart self test log */
1918             memset(s->io_buffer, 0, 0x200);
1919             s->io_buffer[0] = 0x01;
1920             if (s->smart_selftest_count == 0) {
1921                 s->io_buffer[508] = 0;
1922             } else {
1923                 s->io_buffer[508] = s->smart_selftest_count;
1924                 for (n = 2; n < 506; n++)  {
1925                     s->io_buffer[n] = s->smart_selftest_data[n];
1926                 }
1927             }
1928 
1929             for (n = 0; n < 511; n++) {
1930                 s->io_buffer[511] += s->io_buffer[n];
1931             }
1932             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1933             break;
1934         default:
1935             goto abort_cmd;
1936         }
1937         s->status = READY_STAT | SEEK_STAT;
1938         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1939         ide_set_irq(s->bus);
1940         return false;
1941 
1942     case SMART_EXECUTE_OFFLINE:
1943         switch (s->sector) {
1944         case 0: /* off-line routine */
1945         case 1: /* short self test */
1946         case 2: /* extended self test */
1947             s->smart_selftest_count++;
1948             if (s->smart_selftest_count > 21) {
1949                 s->smart_selftest_count = 1;
1950             }
1951             n = 2 + (s->smart_selftest_count - 1) * 24;
1952             s->smart_selftest_data[n] = s->sector;
1953             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1954             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1955             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1956             break;
1957         default:
1958             goto abort_cmd;
1959         }
1960         return true;
1961     }
1962 
1963 abort_cmd:
1964     ide_abort_command(s);
1965     return true;
1966 }
1967 
1968 #define HD_OK (1u << IDE_HD)
1969 #define CD_OK (1u << IDE_CD)
1970 #define CFA_OK (1u << IDE_CFATA)
1971 #define HD_CFA_OK (HD_OK | CFA_OK)
1972 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1973 
1974 /* Set the Disk Seek Completed status bit during completion */
1975 #define SET_DSC (1u << 8)
1976 
1977 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1978 static const struct {
1979     /* Returns true if the completion code should be run */
1980     bool (*handler)(IDEState *s, uint8_t cmd);
1981     int flags;
1982 } ide_cmd_table[0x100] = {
1983     /* NOP not implemented, mandatory for CD */
1984     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1985     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1986     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1987     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1988     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1989     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1990     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1991     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1992     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1993     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1994     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1995     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1996     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1997     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1998     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
1999     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2000     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2001     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2002     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2003     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2004     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2005     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2006     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2007     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2008     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2009     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2010     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2011     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2012     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2013     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2014     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2015     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2016     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2017     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2018     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2019     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2020     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2021     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2022     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2023     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2024     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2025     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2026     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2027     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2028     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2029     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2030     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2031     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2032     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2033     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2034     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2035     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2036     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2037     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2038     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2039     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2040 };
2041 
2042 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2043 {
2044     return cmd < ARRAY_SIZE(ide_cmd_table)
2045         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2046 }
2047 
2048 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2049 {
2050     IDEState *s;
2051     bool complete;
2052 
2053     s = idebus_active_if(bus);
2054     trace_ide_exec_cmd(bus, s, val);
2055 
2056     /* ignore commands to non existent slave */
2057     if (s != bus->ifs && !s->blk) {
2058         return;
2059     }
2060 
2061     /* Only RESET is allowed while BSY and/or DRQ are set,
2062      * and only to ATAPI devices. */
2063     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2064         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2065             return;
2066         }
2067     }
2068 
2069     if (!ide_cmd_permitted(s, val)) {
2070         ide_abort_command(s);
2071         ide_set_irq(s->bus);
2072         return;
2073     }
2074 
2075     s->status = READY_STAT | BUSY_STAT;
2076     s->error = 0;
2077     s->io_buffer_offset = 0;
2078 
2079     complete = ide_cmd_table[val].handler(s, val);
2080     if (complete) {
2081         s->status &= ~BUSY_STAT;
2082         assert(!!s->error == !!(s->status & ERR_STAT));
2083 
2084         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2085             s->status |= SEEK_STAT;
2086         }
2087 
2088         ide_cmd_done(s);
2089         ide_set_irq(s->bus);
2090     }
2091 }
2092 
2093 /* IOport [R]ead [R]egisters */
2094 enum ATA_IOPORT_RR {
2095     ATA_IOPORT_RR_DATA = 0,
2096     ATA_IOPORT_RR_ERROR = 1,
2097     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2098     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2099     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2100     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2101     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2102     ATA_IOPORT_RR_STATUS = 7,
2103     ATA_IOPORT_RR_NUM_REGISTERS,
2104 };
2105 
2106 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2107     [ATA_IOPORT_RR_DATA] = "Data",
2108     [ATA_IOPORT_RR_ERROR] = "Error",
2109     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2110     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2111     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2112     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2113     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2114     [ATA_IOPORT_RR_STATUS] = "Status"
2115 };
2116 
2117 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2118 {
2119     IDEBus *bus = opaque;
2120     IDEState *s = idebus_active_if(bus);
2121     uint32_t reg_num;
2122     int ret, hob;
2123 
2124     reg_num = addr & 7;
2125     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2126     //hob = s->select & (1 << 7);
2127     hob = 0;
2128     switch (reg_num) {
2129     case ATA_IOPORT_RR_DATA:
2130         ret = 0xff;
2131         break;
2132     case ATA_IOPORT_RR_ERROR:
2133         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2134             (s != bus->ifs && !s->blk)) {
2135             ret = 0;
2136         } else if (!hob) {
2137             ret = s->error;
2138         } else {
2139 	    ret = s->hob_feature;
2140         }
2141         break;
2142     case ATA_IOPORT_RR_SECTOR_COUNT:
2143         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2144             ret = 0;
2145         } else if (!hob) {
2146             ret = s->nsector & 0xff;
2147         } else {
2148 	    ret = s->hob_nsector;
2149         }
2150         break;
2151     case ATA_IOPORT_RR_SECTOR_NUMBER:
2152         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2153             ret = 0;
2154         } else if (!hob) {
2155             ret = s->sector;
2156         } else {
2157 	    ret = s->hob_sector;
2158         }
2159         break;
2160     case ATA_IOPORT_RR_CYLINDER_LOW:
2161         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2162             ret = 0;
2163         } else if (!hob) {
2164             ret = s->lcyl;
2165         } else {
2166 	    ret = s->hob_lcyl;
2167         }
2168         break;
2169     case ATA_IOPORT_RR_CYLINDER_HIGH:
2170         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2171             ret = 0;
2172         } else if (!hob) {
2173             ret = s->hcyl;
2174         } else {
2175 	    ret = s->hob_hcyl;
2176         }
2177         break;
2178     case ATA_IOPORT_RR_DEVICE_HEAD:
2179         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2180             ret = 0;
2181         } else {
2182             ret = s->select;
2183         }
2184         break;
2185     default:
2186     case ATA_IOPORT_RR_STATUS:
2187         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2188             (s != bus->ifs && !s->blk)) {
2189             ret = 0;
2190         } else {
2191             ret = s->status;
2192         }
2193         qemu_irq_lower(bus->irq);
2194         break;
2195     }
2196 
2197     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2198     return ret;
2199 }
2200 
2201 uint32_t ide_status_read(void *opaque, uint32_t addr)
2202 {
2203     IDEBus *bus = opaque;
2204     IDEState *s = idebus_active_if(bus);
2205     int ret;
2206 
2207     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2208         (s != bus->ifs && !s->blk)) {
2209         ret = 0;
2210     } else {
2211         ret = s->status;
2212     }
2213 
2214     trace_ide_status_read(addr, ret, bus, s);
2215     return ret;
2216 }
2217 
2218 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2219 {
2220     IDEBus *bus = opaque;
2221     IDEState *s;
2222     int i;
2223 
2224     trace_ide_cmd_write(addr, val, bus);
2225 
2226     /* common for both drives */
2227     if (!(bus->cmd & IDE_CMD_RESET) &&
2228         (val & IDE_CMD_RESET)) {
2229         /* reset low to high */
2230         for(i = 0;i < 2; i++) {
2231             s = &bus->ifs[i];
2232             s->status = BUSY_STAT | SEEK_STAT;
2233             s->error = 0x01;
2234         }
2235     } else if ((bus->cmd & IDE_CMD_RESET) &&
2236                !(val & IDE_CMD_RESET)) {
2237         /* high to low */
2238         for(i = 0;i < 2; i++) {
2239             s = &bus->ifs[i];
2240             if (s->drive_kind == IDE_CD)
2241                 s->status = 0x00; /* NOTE: READY is _not_ set */
2242             else
2243                 s->status = READY_STAT | SEEK_STAT;
2244             ide_set_signature(s);
2245         }
2246     }
2247 
2248     bus->cmd = val;
2249 }
2250 
2251 /*
2252  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2253  * transferred from the device to the guest), false if it's a PIO in
2254  */
2255 static bool ide_is_pio_out(IDEState *s)
2256 {
2257     if (s->end_transfer_func == ide_sector_write ||
2258         s->end_transfer_func == ide_atapi_cmd) {
2259         return false;
2260     } else if (s->end_transfer_func == ide_sector_read ||
2261                s->end_transfer_func == ide_transfer_stop ||
2262                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2263                s->end_transfer_func == ide_dummy_transfer_stop) {
2264         return true;
2265     }
2266 
2267     abort();
2268 }
2269 
2270 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2271 {
2272     IDEBus *bus = opaque;
2273     IDEState *s = idebus_active_if(bus);
2274     uint8_t *p;
2275 
2276     trace_ide_data_writew(addr, val, bus, s);
2277 
2278     /* PIO data access allowed only when DRQ bit is set. The result of a write
2279      * during PIO out is indeterminate, just ignore it. */
2280     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2281         return;
2282     }
2283 
2284     p = s->data_ptr;
2285     if (p + 2 > s->data_end) {
2286         return;
2287     }
2288 
2289     *(uint16_t *)p = le16_to_cpu(val);
2290     p += 2;
2291     s->data_ptr = p;
2292     if (p >= s->data_end) {
2293         s->status &= ~DRQ_STAT;
2294         s->end_transfer_func(s);
2295     }
2296 }
2297 
2298 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2299 {
2300     IDEBus *bus = opaque;
2301     IDEState *s = idebus_active_if(bus);
2302     uint8_t *p;
2303     int ret;
2304 
2305     /* PIO data access allowed only when DRQ bit is set. The result of a read
2306      * during PIO in is indeterminate, return 0 and don't move forward. */
2307     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2308         return 0;
2309     }
2310 
2311     p = s->data_ptr;
2312     if (p + 2 > s->data_end) {
2313         return 0;
2314     }
2315 
2316     ret = cpu_to_le16(*(uint16_t *)p);
2317     p += 2;
2318     s->data_ptr = p;
2319     if (p >= s->data_end) {
2320         s->status &= ~DRQ_STAT;
2321         s->end_transfer_func(s);
2322     }
2323 
2324     trace_ide_data_readw(addr, ret, bus, s);
2325     return ret;
2326 }
2327 
2328 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2329 {
2330     IDEBus *bus = opaque;
2331     IDEState *s = idebus_active_if(bus);
2332     uint8_t *p;
2333 
2334     trace_ide_data_writel(addr, val, bus, s);
2335 
2336     /* PIO data access allowed only when DRQ bit is set. The result of a write
2337      * during PIO out is indeterminate, just ignore it. */
2338     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2339         return;
2340     }
2341 
2342     p = s->data_ptr;
2343     if (p + 4 > s->data_end) {
2344         return;
2345     }
2346 
2347     *(uint32_t *)p = le32_to_cpu(val);
2348     p += 4;
2349     s->data_ptr = p;
2350     if (p >= s->data_end) {
2351         s->status &= ~DRQ_STAT;
2352         s->end_transfer_func(s);
2353     }
2354 }
2355 
2356 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2357 {
2358     IDEBus *bus = opaque;
2359     IDEState *s = idebus_active_if(bus);
2360     uint8_t *p;
2361     int ret;
2362 
2363     /* PIO data access allowed only when DRQ bit is set. The result of a read
2364      * during PIO in is indeterminate, return 0 and don't move forward. */
2365     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2366         ret = 0;
2367         goto out;
2368     }
2369 
2370     p = s->data_ptr;
2371     if (p + 4 > s->data_end) {
2372         return 0;
2373     }
2374 
2375     ret = cpu_to_le32(*(uint32_t *)p);
2376     p += 4;
2377     s->data_ptr = p;
2378     if (p >= s->data_end) {
2379         s->status &= ~DRQ_STAT;
2380         s->end_transfer_func(s);
2381     }
2382 
2383 out:
2384     trace_ide_data_readl(addr, ret, bus, s);
2385     return ret;
2386 }
2387 
2388 static void ide_dummy_transfer_stop(IDEState *s)
2389 {
2390     s->data_ptr = s->io_buffer;
2391     s->data_end = s->io_buffer;
2392     s->io_buffer[0] = 0xff;
2393     s->io_buffer[1] = 0xff;
2394     s->io_buffer[2] = 0xff;
2395     s->io_buffer[3] = 0xff;
2396 }
2397 
2398 void ide_bus_reset(IDEBus *bus)
2399 {
2400     bus->unit = 0;
2401     bus->cmd = 0;
2402     ide_reset(&bus->ifs[0]);
2403     ide_reset(&bus->ifs[1]);
2404     ide_clear_hob(bus);
2405 
2406     /* pending async DMA */
2407     if (bus->dma->aiocb) {
2408         trace_ide_bus_reset_aio();
2409         blk_aio_cancel(bus->dma->aiocb);
2410         bus->dma->aiocb = NULL;
2411     }
2412 
2413     /* reset dma provider too */
2414     if (bus->dma->ops->reset) {
2415         bus->dma->ops->reset(bus->dma);
2416     }
2417 }
2418 
2419 static bool ide_cd_is_tray_open(void *opaque)
2420 {
2421     return ((IDEState *)opaque)->tray_open;
2422 }
2423 
2424 static bool ide_cd_is_medium_locked(void *opaque)
2425 {
2426     return ((IDEState *)opaque)->tray_locked;
2427 }
2428 
2429 static void ide_resize_cb(void *opaque)
2430 {
2431     IDEState *s = opaque;
2432     uint64_t nb_sectors;
2433 
2434     if (!s->identify_set) {
2435         return;
2436     }
2437 
2438     blk_get_geometry(s->blk, &nb_sectors);
2439     s->nb_sectors = nb_sectors;
2440 
2441     /* Update the identify data buffer. */
2442     if (s->drive_kind == IDE_CFATA) {
2443         ide_cfata_identify_size(s);
2444     } else {
2445         /* IDE_CD uses a different set of callbacks entirely. */
2446         assert(s->drive_kind != IDE_CD);
2447         ide_identify_size(s);
2448     }
2449 }
2450 
2451 static const BlockDevOps ide_cd_block_ops = {
2452     .change_media_cb = ide_cd_change_cb,
2453     .eject_request_cb = ide_cd_eject_request_cb,
2454     .is_tray_open = ide_cd_is_tray_open,
2455     .is_medium_locked = ide_cd_is_medium_locked,
2456 };
2457 
2458 static const BlockDevOps ide_hd_block_ops = {
2459     .resize_cb = ide_resize_cb,
2460 };
2461 
2462 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2463                    const char *version, const char *serial, const char *model,
2464                    uint64_t wwn,
2465                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2466                    int chs_trans, Error **errp)
2467 {
2468     uint64_t nb_sectors;
2469 
2470     s->blk = blk;
2471     s->drive_kind = kind;
2472 
2473     blk_get_geometry(blk, &nb_sectors);
2474     s->cylinders = cylinders;
2475     s->heads = heads;
2476     s->sectors = secs;
2477     s->chs_trans = chs_trans;
2478     s->nb_sectors = nb_sectors;
2479     s->wwn = wwn;
2480     /* The SMART values should be preserved across power cycles
2481        but they aren't.  */
2482     s->smart_enabled = 1;
2483     s->smart_autosave = 1;
2484     s->smart_errors = 0;
2485     s->smart_selftest_count = 0;
2486     if (kind == IDE_CD) {
2487         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2488         blk_set_guest_block_size(blk, 2048);
2489     } else {
2490         if (!blk_is_inserted(s->blk)) {
2491             error_setg(errp, "Device needs media, but drive is empty");
2492             return -1;
2493         }
2494         if (blk_is_read_only(blk)) {
2495             error_setg(errp, "Can't use a read-only drive");
2496             return -1;
2497         }
2498         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2499     }
2500     if (serial) {
2501         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2502     } else {
2503         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2504                  "QM%05d", s->drive_serial);
2505     }
2506     if (model) {
2507         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2508     } else {
2509         switch (kind) {
2510         case IDE_CD:
2511             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2512             break;
2513         case IDE_CFATA:
2514             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2515             break;
2516         default:
2517             strcpy(s->drive_model_str, "QEMU HARDDISK");
2518             break;
2519         }
2520     }
2521 
2522     if (version) {
2523         pstrcpy(s->version, sizeof(s->version), version);
2524     } else {
2525         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2526     }
2527 
2528     ide_reset(s);
2529     blk_iostatus_enable(blk);
2530     return 0;
2531 }
2532 
2533 static void ide_init1(IDEBus *bus, int unit)
2534 {
2535     static int drive_serial = 1;
2536     IDEState *s = &bus->ifs[unit];
2537 
2538     s->bus = bus;
2539     s->unit = unit;
2540     s->drive_serial = drive_serial++;
2541     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2542     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2543     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2544     memset(s->io_buffer, 0, s->io_buffer_total_len);
2545 
2546     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2547     memset(s->smart_selftest_data, 0, 512);
2548 
2549     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2550                                            ide_sector_write_timer_cb, s);
2551 }
2552 
2553 static int ide_nop_int(IDEDMA *dma, int x)
2554 {
2555     return 0;
2556 }
2557 
2558 static void ide_nop(IDEDMA *dma)
2559 {
2560 }
2561 
2562 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2563 {
2564     return 0;
2565 }
2566 
2567 static const IDEDMAOps ide_dma_nop_ops = {
2568     .prepare_buf    = ide_nop_int32,
2569     .restart_dma    = ide_nop,
2570     .rw_buf         = ide_nop_int,
2571 };
2572 
2573 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2574 {
2575     s->unit = s->bus->retry_unit;
2576     ide_set_sector(s, s->bus->retry_sector_num);
2577     s->nsector = s->bus->retry_nsector;
2578     s->bus->dma->ops->restart_dma(s->bus->dma);
2579     s->io_buffer_size = 0;
2580     s->dma_cmd = dma_cmd;
2581     ide_start_dma(s, ide_dma_cb);
2582 }
2583 
2584 static void ide_restart_bh(void *opaque)
2585 {
2586     IDEBus *bus = opaque;
2587     IDEState *s;
2588     bool is_read;
2589     int error_status;
2590 
2591     qemu_bh_delete(bus->bh);
2592     bus->bh = NULL;
2593 
2594     error_status = bus->error_status;
2595     if (bus->error_status == 0) {
2596         return;
2597     }
2598 
2599     s = idebus_active_if(bus);
2600     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2601 
2602     /* The error status must be cleared before resubmitting the request: The
2603      * request may fail again, and this case can only be distinguished if the
2604      * called function can set a new error status. */
2605     bus->error_status = 0;
2606 
2607     /* The HBA has generically asked to be kicked on retry */
2608     if (error_status & IDE_RETRY_HBA) {
2609         if (s->bus->dma->ops->restart) {
2610             s->bus->dma->ops->restart(s->bus->dma);
2611         }
2612     } else if (IS_IDE_RETRY_DMA(error_status)) {
2613         if (error_status & IDE_RETRY_TRIM) {
2614             ide_restart_dma(s, IDE_DMA_TRIM);
2615         } else {
2616             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2617         }
2618     } else if (IS_IDE_RETRY_PIO(error_status)) {
2619         if (is_read) {
2620             ide_sector_read(s);
2621         } else {
2622             ide_sector_write(s);
2623         }
2624     } else if (error_status & IDE_RETRY_FLUSH) {
2625         ide_flush_cache(s);
2626     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2627         assert(s->end_transfer_func == ide_atapi_cmd);
2628         ide_atapi_dma_restart(s);
2629     } else {
2630         abort();
2631     }
2632 }
2633 
2634 static void ide_restart_cb(void *opaque, int running, RunState state)
2635 {
2636     IDEBus *bus = opaque;
2637 
2638     if (!running)
2639         return;
2640 
2641     if (!bus->bh) {
2642         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2643         qemu_bh_schedule(bus->bh);
2644     }
2645 }
2646 
2647 void ide_register_restart_cb(IDEBus *bus)
2648 {
2649     if (bus->dma->ops->restart_dma) {
2650         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2651     }
2652 }
2653 
2654 static IDEDMA ide_dma_nop = {
2655     .ops = &ide_dma_nop_ops,
2656     .aiocb = NULL,
2657 };
2658 
2659 void ide_init2(IDEBus *bus, qemu_irq irq)
2660 {
2661     int i;
2662 
2663     for(i = 0; i < 2; i++) {
2664         ide_init1(bus, i);
2665         ide_reset(&bus->ifs[i]);
2666     }
2667     bus->irq = irq;
2668     bus->dma = &ide_dma_nop;
2669 }
2670 
2671 void ide_exit(IDEState *s)
2672 {
2673     timer_del(s->sector_write_timer);
2674     timer_free(s->sector_write_timer);
2675     qemu_vfree(s->smart_selftest_data);
2676     qemu_vfree(s->io_buffer);
2677 }
2678 
2679 static const MemoryRegionPortio ide_portio_list[] = {
2680     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2681     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2682     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2683     PORTIO_END_OF_LIST(),
2684 };
2685 
2686 static const MemoryRegionPortio ide_portio2_list[] = {
2687     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2688     PORTIO_END_OF_LIST(),
2689 };
2690 
2691 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2692 {
2693     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2694        bridge has been setup properly to always register with ISA.  */
2695     isa_register_portio_list(dev, &bus->portio_list,
2696                              iobase, ide_portio_list, bus, "ide");
2697 
2698     if (iobase2) {
2699         isa_register_portio_list(dev, &bus->portio2_list,
2700                                  iobase2, ide_portio2_list, bus, "ide");
2701     }
2702 }
2703 
2704 static bool is_identify_set(void *opaque, int version_id)
2705 {
2706     IDEState *s = opaque;
2707 
2708     return s->identify_set != 0;
2709 }
2710 
2711 static EndTransferFunc* transfer_end_table[] = {
2712         ide_sector_read,
2713         ide_sector_write,
2714         ide_transfer_stop,
2715         ide_atapi_cmd_reply_end,
2716         ide_atapi_cmd,
2717         ide_dummy_transfer_stop,
2718 };
2719 
2720 static int transfer_end_table_idx(EndTransferFunc *fn)
2721 {
2722     int i;
2723 
2724     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2725         if (transfer_end_table[i] == fn)
2726             return i;
2727 
2728     return -1;
2729 }
2730 
2731 static int ide_drive_post_load(void *opaque, int version_id)
2732 {
2733     IDEState *s = opaque;
2734 
2735     if (s->blk && s->identify_set) {
2736         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2737     }
2738     return 0;
2739 }
2740 
2741 static int ide_drive_pio_post_load(void *opaque, int version_id)
2742 {
2743     IDEState *s = opaque;
2744 
2745     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2746         return -EINVAL;
2747     }
2748     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2749     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2750     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2751     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2752 
2753     return 0;
2754 }
2755 
2756 static int ide_drive_pio_pre_save(void *opaque)
2757 {
2758     IDEState *s = opaque;
2759     int idx;
2760 
2761     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2762     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2763 
2764     idx = transfer_end_table_idx(s->end_transfer_func);
2765     if (idx == -1) {
2766         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2767                         __func__);
2768         s->end_transfer_fn_idx = 2;
2769     } else {
2770         s->end_transfer_fn_idx = idx;
2771     }
2772 
2773     return 0;
2774 }
2775 
2776 static bool ide_drive_pio_state_needed(void *opaque)
2777 {
2778     IDEState *s = opaque;
2779 
2780     return ((s->status & DRQ_STAT) != 0)
2781         || (s->bus->error_status & IDE_RETRY_PIO);
2782 }
2783 
2784 static bool ide_tray_state_needed(void *opaque)
2785 {
2786     IDEState *s = opaque;
2787 
2788     return s->tray_open || s->tray_locked;
2789 }
2790 
2791 static bool ide_atapi_gesn_needed(void *opaque)
2792 {
2793     IDEState *s = opaque;
2794 
2795     return s->events.new_media || s->events.eject_request;
2796 }
2797 
2798 static bool ide_error_needed(void *opaque)
2799 {
2800     IDEBus *bus = opaque;
2801 
2802     return (bus->error_status != 0);
2803 }
2804 
2805 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2806 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2807     .name ="ide_drive/atapi/gesn_state",
2808     .version_id = 1,
2809     .minimum_version_id = 1,
2810     .needed = ide_atapi_gesn_needed,
2811     .fields = (VMStateField[]) {
2812         VMSTATE_BOOL(events.new_media, IDEState),
2813         VMSTATE_BOOL(events.eject_request, IDEState),
2814         VMSTATE_END_OF_LIST()
2815     }
2816 };
2817 
2818 static const VMStateDescription vmstate_ide_tray_state = {
2819     .name = "ide_drive/tray_state",
2820     .version_id = 1,
2821     .minimum_version_id = 1,
2822     .needed = ide_tray_state_needed,
2823     .fields = (VMStateField[]) {
2824         VMSTATE_BOOL(tray_open, IDEState),
2825         VMSTATE_BOOL(tray_locked, IDEState),
2826         VMSTATE_END_OF_LIST()
2827     }
2828 };
2829 
2830 static const VMStateDescription vmstate_ide_drive_pio_state = {
2831     .name = "ide_drive/pio_state",
2832     .version_id = 1,
2833     .minimum_version_id = 1,
2834     .pre_save = ide_drive_pio_pre_save,
2835     .post_load = ide_drive_pio_post_load,
2836     .needed = ide_drive_pio_state_needed,
2837     .fields = (VMStateField[]) {
2838         VMSTATE_INT32(req_nb_sectors, IDEState),
2839         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2840 			     vmstate_info_uint8, uint8_t),
2841         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2842         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2843         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2844         VMSTATE_INT32(elementary_transfer_size, IDEState),
2845         VMSTATE_INT32(packet_transfer_size, IDEState),
2846         VMSTATE_END_OF_LIST()
2847     }
2848 };
2849 
2850 const VMStateDescription vmstate_ide_drive = {
2851     .name = "ide_drive",
2852     .version_id = 3,
2853     .minimum_version_id = 0,
2854     .post_load = ide_drive_post_load,
2855     .fields = (VMStateField[]) {
2856         VMSTATE_INT32(mult_sectors, IDEState),
2857         VMSTATE_INT32(identify_set, IDEState),
2858         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2859         VMSTATE_UINT8(feature, IDEState),
2860         VMSTATE_UINT8(error, IDEState),
2861         VMSTATE_UINT32(nsector, IDEState),
2862         VMSTATE_UINT8(sector, IDEState),
2863         VMSTATE_UINT8(lcyl, IDEState),
2864         VMSTATE_UINT8(hcyl, IDEState),
2865         VMSTATE_UINT8(hob_feature, IDEState),
2866         VMSTATE_UINT8(hob_sector, IDEState),
2867         VMSTATE_UINT8(hob_nsector, IDEState),
2868         VMSTATE_UINT8(hob_lcyl, IDEState),
2869         VMSTATE_UINT8(hob_hcyl, IDEState),
2870         VMSTATE_UINT8(select, IDEState),
2871         VMSTATE_UINT8(status, IDEState),
2872         VMSTATE_UINT8(lba48, IDEState),
2873         VMSTATE_UINT8(sense_key, IDEState),
2874         VMSTATE_UINT8(asc, IDEState),
2875         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2876         VMSTATE_END_OF_LIST()
2877     },
2878     .subsections = (const VMStateDescription*[]) {
2879         &vmstate_ide_drive_pio_state,
2880         &vmstate_ide_tray_state,
2881         &vmstate_ide_atapi_gesn_state,
2882         NULL
2883     }
2884 };
2885 
2886 static const VMStateDescription vmstate_ide_error_status = {
2887     .name ="ide_bus/error",
2888     .version_id = 2,
2889     .minimum_version_id = 1,
2890     .needed = ide_error_needed,
2891     .fields = (VMStateField[]) {
2892         VMSTATE_INT32(error_status, IDEBus),
2893         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2894         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2895         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2896         VMSTATE_END_OF_LIST()
2897     }
2898 };
2899 
2900 const VMStateDescription vmstate_ide_bus = {
2901     .name = "ide_bus",
2902     .version_id = 1,
2903     .minimum_version_id = 1,
2904     .fields = (VMStateField[]) {
2905         VMSTATE_UINT8(cmd, IDEBus),
2906         VMSTATE_UINT8(unit, IDEBus),
2907         VMSTATE_END_OF_LIST()
2908     },
2909     .subsections = (const VMStateDescription*[]) {
2910         &vmstate_ide_error_status,
2911         NULL
2912     }
2913 };
2914 
2915 void ide_drive_get(DriveInfo **hd, int n)
2916 {
2917     int i;
2918 
2919     for (i = 0; i < n; i++) {
2920         hd[i] = drive_get_by_index(IF_IDE, i);
2921     }
2922 }
2923