xref: /openbmc/qemu/hw/ide/core.c (revision d84be02d)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/pci/pci.h"
28 #include "hw/isa/isa.h"
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/blockdev.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 
38 #include "hw/ide/internal.h"
39 #include "trace.h"
40 
41 /* These values were based on a Seagate ST3500418AS but have been modified
42    to make more sense in QEMU */
43 static const int smart_attributes[][12] = {
44     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
45     /* raw read error rate*/
46     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
47     /* spin up */
48     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
49     /* start stop count */
50     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
51     /* remapped sectors */
52     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
53     /* power on hours */
54     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55     /* power cycle count */
56     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
57     /* airflow-temperature-celsius */
58     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
59 };
60 
61 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
62     [IDE_DMA_READ] = "DMA READ",
63     [IDE_DMA_WRITE] = "DMA WRITE",
64     [IDE_DMA_TRIM] = "DMA TRIM",
65     [IDE_DMA_ATAPI] = "DMA ATAPI"
66 };
67 
68 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
69 {
70     if ((unsigned)enval < IDE_DMA__COUNT) {
71         return IDE_DMA_CMD_lookup[enval];
72     }
73     return "DMA UNKNOWN CMD";
74 }
75 
76 static void ide_dummy_transfer_stop(IDEState *s);
77 
78 static void padstr(char *str, const char *src, int len)
79 {
80     int i, v;
81     for(i = 0; i < len; i++) {
82         if (*src)
83             v = *src++;
84         else
85             v = ' ';
86         str[i^1] = v;
87     }
88 }
89 
90 static void put_le16(uint16_t *p, unsigned int v)
91 {
92     *p = cpu_to_le16(v);
93 }
94 
95 static void ide_identify_size(IDEState *s)
96 {
97     uint16_t *p = (uint16_t *)s->identify_data;
98     put_le16(p + 60, s->nb_sectors);
99     put_le16(p + 61, s->nb_sectors >> 16);
100     put_le16(p + 100, s->nb_sectors);
101     put_le16(p + 101, s->nb_sectors >> 16);
102     put_le16(p + 102, s->nb_sectors >> 32);
103     put_le16(p + 103, s->nb_sectors >> 48);
104 }
105 
106 static void ide_identify(IDEState *s)
107 {
108     uint16_t *p;
109     unsigned int oldsize;
110     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
111 
112     p = (uint16_t *)s->identify_data;
113     if (s->identify_set) {
114         goto fill_buffer;
115     }
116     memset(p, 0, sizeof(s->identify_data));
117 
118     put_le16(p + 0, 0x0040);
119     put_le16(p + 1, s->cylinders);
120     put_le16(p + 3, s->heads);
121     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
122     put_le16(p + 5, 512); /* XXX: retired, remove ? */
123     put_le16(p + 6, s->sectors);
124     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
125     put_le16(p + 20, 3); /* XXX: retired, remove ? */
126     put_le16(p + 21, 512); /* cache size in sectors */
127     put_le16(p + 22, 4); /* ecc bytes */
128     padstr((char *)(p + 23), s->version, 8); /* firmware version */
129     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
130 #if MAX_MULT_SECTORS > 1
131     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
132 #endif
133     put_le16(p + 48, 1); /* dword I/O */
134     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
135     put_le16(p + 51, 0x200); /* PIO transfer cycle */
136     put_le16(p + 52, 0x200); /* DMA transfer cycle */
137     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
138     put_le16(p + 54, s->cylinders);
139     put_le16(p + 55, s->heads);
140     put_le16(p + 56, s->sectors);
141     oldsize = s->cylinders * s->heads * s->sectors;
142     put_le16(p + 57, oldsize);
143     put_le16(p + 58, oldsize >> 16);
144     if (s->mult_sectors)
145         put_le16(p + 59, 0x100 | s->mult_sectors);
146     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
147     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
148     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
149     put_le16(p + 63, 0x07); /* mdma0-2 supported */
150     put_le16(p + 64, 0x03); /* pio3-4 supported */
151     put_le16(p + 65, 120);
152     put_le16(p + 66, 120);
153     put_le16(p + 67, 120);
154     put_le16(p + 68, 120);
155     if (dev && dev->conf.discard_granularity) {
156         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
157     }
158 
159     if (s->ncq_queues) {
160         put_le16(p + 75, s->ncq_queues - 1);
161         /* NCQ supported */
162         put_le16(p + 76, (1 << 8));
163     }
164 
165     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
166     put_le16(p + 81, 0x16); /* conforms to ata5 */
167     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
168     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
169     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
170     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
171     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
172     if (s->wwn) {
173         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
174     } else {
175         put_le16(p + 84, (1 << 14) | 0);
176     }
177     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
178     if (blk_enable_write_cache(s->blk)) {
179         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
180     } else {
181         put_le16(p + 85, (1 << 14) | 1);
182     }
183     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
184     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
185     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
186     if (s->wwn) {
187         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
188     } else {
189         put_le16(p + 87, (1 << 14) | 0);
190     }
191     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
192     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
193     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
194     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
195     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
196     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
197 
198     if (dev && dev->conf.physical_block_size)
199         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
200     if (s->wwn) {
201         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
202         put_le16(p + 108, s->wwn >> 48);
203         put_le16(p + 109, s->wwn >> 32);
204         put_le16(p + 110, s->wwn >> 16);
205         put_le16(p + 111, s->wwn);
206     }
207     if (dev && dev->conf.discard_granularity) {
208         put_le16(p + 169, 1); /* TRIM support */
209     }
210     if (dev) {
211         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
212     }
213 
214     ide_identify_size(s);
215     s->identify_set = 1;
216 
217 fill_buffer:
218     memcpy(s->io_buffer, p, sizeof(s->identify_data));
219 }
220 
221 static void ide_atapi_identify(IDEState *s)
222 {
223     uint16_t *p;
224 
225     p = (uint16_t *)s->identify_data;
226     if (s->identify_set) {
227         goto fill_buffer;
228     }
229     memset(p, 0, sizeof(s->identify_data));
230 
231     /* Removable CDROM, 50us response, 12 byte packets */
232     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
233     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
234     put_le16(p + 20, 3); /* buffer type */
235     put_le16(p + 21, 512); /* cache size in sectors */
236     put_le16(p + 22, 4); /* ecc bytes */
237     padstr((char *)(p + 23), s->version, 8); /* firmware version */
238     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
239     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
240 #ifdef USE_DMA_CDROM
241     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
242     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
243     put_le16(p + 62, 7);  /* single word dma0-2 supported */
244     put_le16(p + 63, 7);  /* mdma0-2 supported */
245 #else
246     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
247     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
248     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
249 #endif
250     put_le16(p + 64, 3); /* pio3-4 supported */
251     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
252     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
253     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
254     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
255 
256     put_le16(p + 71, 30); /* in ns */
257     put_le16(p + 72, 30); /* in ns */
258 
259     if (s->ncq_queues) {
260         put_le16(p + 75, s->ncq_queues - 1);
261         /* NCQ supported */
262         put_le16(p + 76, (1 << 8));
263     }
264 
265     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
266     if (s->wwn) {
267         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
268         put_le16(p + 87, (1 << 8)); /* WWN enabled */
269     }
270 
271 #ifdef USE_DMA_CDROM
272     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
273 #endif
274 
275     if (s->wwn) {
276         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
277         put_le16(p + 108, s->wwn >> 48);
278         put_le16(p + 109, s->wwn >> 32);
279         put_le16(p + 110, s->wwn >> 16);
280         put_le16(p + 111, s->wwn);
281     }
282 
283     s->identify_set = 1;
284 
285 fill_buffer:
286     memcpy(s->io_buffer, p, sizeof(s->identify_data));
287 }
288 
289 static void ide_cfata_identify_size(IDEState *s)
290 {
291     uint16_t *p = (uint16_t *)s->identify_data;
292     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
293     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
294     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
295     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
296 }
297 
298 static void ide_cfata_identify(IDEState *s)
299 {
300     uint16_t *p;
301     uint32_t cur_sec;
302 
303     p = (uint16_t *)s->identify_data;
304     if (s->identify_set) {
305         goto fill_buffer;
306     }
307     memset(p, 0, sizeof(s->identify_data));
308 
309     cur_sec = s->cylinders * s->heads * s->sectors;
310 
311     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
312     put_le16(p + 1, s->cylinders);		/* Default cylinders */
313     put_le16(p + 3, s->heads);			/* Default heads */
314     put_le16(p + 6, s->sectors);		/* Default sectors per track */
315     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
316     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
317     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
318     put_le16(p + 22, 0x0004);			/* ECC bytes */
319     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
320     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
321 #if MAX_MULT_SECTORS > 1
322     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
323 #else
324     put_le16(p + 47, 0x0000);
325 #endif
326     put_le16(p + 49, 0x0f00);			/* Capabilities */
327     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
328     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
329     put_le16(p + 53, 0x0003);			/* Translation params valid */
330     put_le16(p + 54, s->cylinders);		/* Current cylinders */
331     put_le16(p + 55, s->heads);			/* Current heads */
332     put_le16(p + 56, s->sectors);		/* Current sectors */
333     put_le16(p + 57, cur_sec);			/* Current capacity */
334     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
335     if (s->mult_sectors)			/* Multiple sector setting */
336         put_le16(p + 59, 0x100 | s->mult_sectors);
337     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
338     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
339     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
340     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
341     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
342     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
343     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
344     put_le16(p + 82, 0x400c);			/* Command Set supported */
345     put_le16(p + 83, 0x7068);			/* Command Set supported */
346     put_le16(p + 84, 0x4000);			/* Features supported */
347     put_le16(p + 85, 0x000c);			/* Command Set enabled */
348     put_le16(p + 86, 0x7044);			/* Command Set enabled */
349     put_le16(p + 87, 0x4000);			/* Features enabled */
350     put_le16(p + 91, 0x4060);			/* Current APM level */
351     put_le16(p + 129, 0x0002);			/* Current features option */
352     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
353     put_le16(p + 131, 0x0001);			/* Initial power mode */
354     put_le16(p + 132, 0x0000);			/* User signature */
355     put_le16(p + 160, 0x8100);			/* Power requirement */
356     put_le16(p + 161, 0x8001);			/* CF command set */
357 
358     ide_cfata_identify_size(s);
359     s->identify_set = 1;
360 
361 fill_buffer:
362     memcpy(s->io_buffer, p, sizeof(s->identify_data));
363 }
364 
365 static void ide_set_signature(IDEState *s)
366 {
367     s->select &= 0xf0; /* clear head */
368     /* put signature */
369     s->nsector = 1;
370     s->sector = 1;
371     if (s->drive_kind == IDE_CD) {
372         s->lcyl = 0x14;
373         s->hcyl = 0xeb;
374     } else if (s->blk) {
375         s->lcyl = 0;
376         s->hcyl = 0;
377     } else {
378         s->lcyl = 0xff;
379         s->hcyl = 0xff;
380     }
381 }
382 
383 typedef struct TrimAIOCB {
384     BlockAIOCB common;
385     BlockBackend *blk;
386     QEMUBH *bh;
387     int ret;
388     QEMUIOVector *qiov;
389     BlockAIOCB *aiocb;
390     int i, j;
391 } TrimAIOCB;
392 
393 static void trim_aio_cancel(BlockAIOCB *acb)
394 {
395     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
396 
397     /* Exit the loop so ide_issue_trim_cb will not continue  */
398     iocb->j = iocb->qiov->niov - 1;
399     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
400 
401     iocb->ret = -ECANCELED;
402 
403     if (iocb->aiocb) {
404         blk_aio_cancel_async(iocb->aiocb);
405         iocb->aiocb = NULL;
406     }
407 }
408 
409 static const AIOCBInfo trim_aiocb_info = {
410     .aiocb_size         = sizeof(TrimAIOCB),
411     .cancel_async       = trim_aio_cancel,
412 };
413 
414 static void ide_trim_bh_cb(void *opaque)
415 {
416     TrimAIOCB *iocb = opaque;
417 
418     iocb->common.cb(iocb->common.opaque, iocb->ret);
419 
420     qemu_bh_delete(iocb->bh);
421     iocb->bh = NULL;
422     qemu_aio_unref(iocb);
423 }
424 
425 static void ide_issue_trim_cb(void *opaque, int ret)
426 {
427     TrimAIOCB *iocb = opaque;
428     if (ret >= 0) {
429         while (iocb->j < iocb->qiov->niov) {
430             int j = iocb->j;
431             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
432                 int i = iocb->i;
433                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
434 
435                 /* 6-byte LBA + 2-byte range per entry */
436                 uint64_t entry = le64_to_cpu(buffer[i]);
437                 uint64_t sector = entry & 0x0000ffffffffffffULL;
438                 uint16_t count = entry >> 48;
439 
440                 if (count == 0) {
441                     continue;
442                 }
443 
444                 /* Got an entry! Submit and exit.  */
445                 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
446                                                sector << BDRV_SECTOR_BITS,
447                                                count << BDRV_SECTOR_BITS,
448                                                ide_issue_trim_cb, opaque);
449                 return;
450             }
451 
452             iocb->j++;
453             iocb->i = -1;
454         }
455     } else {
456         iocb->ret = ret;
457     }
458 
459     iocb->aiocb = NULL;
460     if (iocb->bh) {
461         qemu_bh_schedule(iocb->bh);
462     }
463 }
464 
465 BlockAIOCB *ide_issue_trim(
466         int64_t offset, QEMUIOVector *qiov,
467         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
468 {
469     BlockBackend *blk = opaque;
470     TrimAIOCB *iocb;
471 
472     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
473     iocb->blk = blk;
474     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
475     iocb->ret = 0;
476     iocb->qiov = qiov;
477     iocb->i = -1;
478     iocb->j = 0;
479     ide_issue_trim_cb(iocb, 0);
480     return &iocb->common;
481 }
482 
483 void ide_abort_command(IDEState *s)
484 {
485     ide_transfer_stop(s);
486     s->status = READY_STAT | ERR_STAT;
487     s->error = ABRT_ERR;
488 }
489 
490 static void ide_set_retry(IDEState *s)
491 {
492     s->bus->retry_unit = s->unit;
493     s->bus->retry_sector_num = ide_get_sector(s);
494     s->bus->retry_nsector = s->nsector;
495 }
496 
497 static void ide_clear_retry(IDEState *s)
498 {
499     s->bus->retry_unit = -1;
500     s->bus->retry_sector_num = 0;
501     s->bus->retry_nsector = 0;
502 }
503 
504 /* prepare data transfer and tell what to do after */
505 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
506                         EndTransferFunc *end_transfer_func)
507 {
508     s->end_transfer_func = end_transfer_func;
509     s->data_ptr = buf;
510     s->data_end = buf + size;
511     ide_set_retry(s);
512     if (!(s->status & ERR_STAT)) {
513         s->status |= DRQ_STAT;
514     }
515     if (s->bus->dma->ops->start_transfer) {
516         s->bus->dma->ops->start_transfer(s->bus->dma);
517     }
518 }
519 
520 static void ide_cmd_done(IDEState *s)
521 {
522     if (s->bus->dma->ops->cmd_done) {
523         s->bus->dma->ops->cmd_done(s->bus->dma);
524     }
525 }
526 
527 static void ide_transfer_halt(IDEState *s,
528                               void(*end_transfer_func)(IDEState *),
529                               bool notify)
530 {
531     s->end_transfer_func = end_transfer_func;
532     s->data_ptr = s->io_buffer;
533     s->data_end = s->io_buffer;
534     s->status &= ~DRQ_STAT;
535     if (notify) {
536         ide_cmd_done(s);
537     }
538 }
539 
540 void ide_transfer_stop(IDEState *s)
541 {
542     ide_transfer_halt(s, ide_transfer_stop, true);
543 }
544 
545 static void ide_transfer_cancel(IDEState *s)
546 {
547     ide_transfer_halt(s, ide_transfer_cancel, false);
548 }
549 
550 int64_t ide_get_sector(IDEState *s)
551 {
552     int64_t sector_num;
553     if (s->select & 0x40) {
554         /* lba */
555 	if (!s->lba48) {
556 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
557 		(s->lcyl << 8) | s->sector;
558 	} else {
559 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
560 		((int64_t) s->hob_lcyl << 32) |
561 		((int64_t) s->hob_sector << 24) |
562 		((int64_t) s->hcyl << 16) |
563 		((int64_t) s->lcyl << 8) | s->sector;
564 	}
565     } else {
566         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
567             (s->select & 0x0f) * s->sectors + (s->sector - 1);
568     }
569     return sector_num;
570 }
571 
572 void ide_set_sector(IDEState *s, int64_t sector_num)
573 {
574     unsigned int cyl, r;
575     if (s->select & 0x40) {
576 	if (!s->lba48) {
577             s->select = (s->select & 0xf0) | (sector_num >> 24);
578             s->hcyl = (sector_num >> 16);
579             s->lcyl = (sector_num >> 8);
580             s->sector = (sector_num);
581 	} else {
582 	    s->sector = sector_num;
583 	    s->lcyl = sector_num >> 8;
584 	    s->hcyl = sector_num >> 16;
585 	    s->hob_sector = sector_num >> 24;
586 	    s->hob_lcyl = sector_num >> 32;
587 	    s->hob_hcyl = sector_num >> 40;
588 	}
589     } else {
590         cyl = sector_num / (s->heads * s->sectors);
591         r = sector_num % (s->heads * s->sectors);
592         s->hcyl = cyl >> 8;
593         s->lcyl = cyl;
594         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
595         s->sector = (r % s->sectors) + 1;
596     }
597 }
598 
599 static void ide_rw_error(IDEState *s) {
600     ide_abort_command(s);
601     ide_set_irq(s->bus);
602 }
603 
604 static bool ide_sect_range_ok(IDEState *s,
605                               uint64_t sector, uint64_t nb_sectors)
606 {
607     uint64_t total_sectors;
608 
609     blk_get_geometry(s->blk, &total_sectors);
610     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
611         return false;
612     }
613     return true;
614 }
615 
616 static void ide_buffered_readv_cb(void *opaque, int ret)
617 {
618     IDEBufferedRequest *req = opaque;
619     if (!req->orphaned) {
620         if (!ret) {
621             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
622                                 req->original_qiov->size);
623         }
624         req->original_cb(req->original_opaque, ret);
625     }
626     QLIST_REMOVE(req, list);
627     qemu_vfree(req->iov.iov_base);
628     g_free(req);
629 }
630 
631 #define MAX_BUFFERED_REQS 16
632 
633 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
634                                QEMUIOVector *iov, int nb_sectors,
635                                BlockCompletionFunc *cb, void *opaque)
636 {
637     BlockAIOCB *aioreq;
638     IDEBufferedRequest *req;
639     int c = 0;
640 
641     QLIST_FOREACH(req, &s->buffered_requests, list) {
642         c++;
643     }
644     if (c > MAX_BUFFERED_REQS) {
645         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
646     }
647 
648     req = g_new0(IDEBufferedRequest, 1);
649     req->original_qiov = iov;
650     req->original_cb = cb;
651     req->original_opaque = opaque;
652     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
653     req->iov.iov_len = iov->size;
654     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
655 
656     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
657                             &req->qiov, 0, ide_buffered_readv_cb, req);
658 
659     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
660     return aioreq;
661 }
662 
663 /**
664  * Cancel all pending DMA requests.
665  * Any buffered DMA requests are instantly canceled,
666  * but any pending unbuffered DMA requests must be waited on.
667  */
668 void ide_cancel_dma_sync(IDEState *s)
669 {
670     IDEBufferedRequest *req;
671 
672     /* First invoke the callbacks of all buffered requests
673      * and flag those requests as orphaned. Ideally there
674      * are no unbuffered (Scatter Gather DMA Requests or
675      * write requests) pending and we can avoid to drain. */
676     QLIST_FOREACH(req, &s->buffered_requests, list) {
677         if (!req->orphaned) {
678             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
679             req->original_cb(req->original_opaque, -ECANCELED);
680         }
681         req->orphaned = true;
682     }
683 
684     /*
685      * We can't cancel Scatter Gather DMA in the middle of the
686      * operation or a partial (not full) DMA transfer would reach
687      * the storage so we wait for completion instead (we beahve
688      * like if the DMA was completed by the time the guest trying
689      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
690      * set).
691      *
692      * In the future we'll be able to safely cancel the I/O if the
693      * whole DMA operation will be submitted to disk with a single
694      * aio operation with preadv/pwritev.
695      */
696     if (s->bus->dma->aiocb) {
697         trace_ide_cancel_dma_sync_remaining();
698         blk_drain(s->blk);
699         assert(s->bus->dma->aiocb == NULL);
700     }
701 }
702 
703 static void ide_sector_read(IDEState *s);
704 
705 static void ide_sector_read_cb(void *opaque, int ret)
706 {
707     IDEState *s = opaque;
708     int n;
709 
710     s->pio_aiocb = NULL;
711     s->status &= ~BUSY_STAT;
712 
713     if (ret == -ECANCELED) {
714         return;
715     }
716     if (ret != 0) {
717         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
718                                 IDE_RETRY_READ)) {
719             return;
720         }
721     }
722 
723     block_acct_done(blk_get_stats(s->blk), &s->acct);
724 
725     n = s->nsector;
726     if (n > s->req_nb_sectors) {
727         n = s->req_nb_sectors;
728     }
729 
730     ide_set_sector(s, ide_get_sector(s) + n);
731     s->nsector -= n;
732     /* Allow the guest to read the io_buffer */
733     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
734     ide_set_irq(s->bus);
735 }
736 
737 static void ide_sector_read(IDEState *s)
738 {
739     int64_t sector_num;
740     int n;
741 
742     s->status = READY_STAT | SEEK_STAT;
743     s->error = 0; /* not needed by IDE spec, but needed by Windows */
744     sector_num = ide_get_sector(s);
745     n = s->nsector;
746 
747     if (n == 0) {
748         ide_transfer_stop(s);
749         return;
750     }
751 
752     s->status |= BUSY_STAT;
753 
754     if (n > s->req_nb_sectors) {
755         n = s->req_nb_sectors;
756     }
757 
758     trace_ide_sector_read(sector_num, n);
759 
760     if (!ide_sect_range_ok(s, sector_num, n)) {
761         ide_rw_error(s);
762         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
763         return;
764     }
765 
766     s->iov.iov_base = s->io_buffer;
767     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
768     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
769 
770     block_acct_start(blk_get_stats(s->blk), &s->acct,
771                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
772     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
773                                       ide_sector_read_cb, s);
774 }
775 
776 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
777 {
778     if (s->bus->dma->ops->commit_buf) {
779         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
780     }
781     s->io_buffer_offset += tx_bytes;
782     qemu_sglist_destroy(&s->sg);
783 }
784 
785 void ide_set_inactive(IDEState *s, bool more)
786 {
787     s->bus->dma->aiocb = NULL;
788     ide_clear_retry(s);
789     if (s->bus->dma->ops->set_inactive) {
790         s->bus->dma->ops->set_inactive(s->bus->dma, more);
791     }
792     ide_cmd_done(s);
793 }
794 
795 void ide_dma_error(IDEState *s)
796 {
797     dma_buf_commit(s, 0);
798     ide_abort_command(s);
799     ide_set_inactive(s, false);
800     ide_set_irq(s->bus);
801 }
802 
803 int ide_handle_rw_error(IDEState *s, int error, int op)
804 {
805     bool is_read = (op & IDE_RETRY_READ) != 0;
806     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
807 
808     if (action == BLOCK_ERROR_ACTION_STOP) {
809         assert(s->bus->retry_unit == s->unit);
810         s->bus->error_status = op;
811     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
812         block_acct_failed(blk_get_stats(s->blk), &s->acct);
813         if (IS_IDE_RETRY_DMA(op)) {
814             ide_dma_error(s);
815         } else if (IS_IDE_RETRY_ATAPI(op)) {
816             ide_atapi_io_error(s, -error);
817         } else {
818             ide_rw_error(s);
819         }
820     }
821     blk_error_action(s->blk, action, is_read, error);
822     return action != BLOCK_ERROR_ACTION_IGNORE;
823 }
824 
825 static void ide_dma_cb(void *opaque, int ret)
826 {
827     IDEState *s = opaque;
828     int n;
829     int64_t sector_num;
830     uint64_t offset;
831     bool stay_active = false;
832 
833     if (ret == -ECANCELED) {
834         return;
835     }
836     if (ret < 0) {
837         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
838             s->bus->dma->aiocb = NULL;
839             dma_buf_commit(s, 0);
840             return;
841         }
842     }
843 
844     n = s->io_buffer_size >> 9;
845     if (n > s->nsector) {
846         /* The PRDs were longer than needed for this request. Shorten them so
847          * we don't get a negative remainder. The Active bit must remain set
848          * after the request completes. */
849         n = s->nsector;
850         stay_active = true;
851     }
852 
853     sector_num = ide_get_sector(s);
854     if (n > 0) {
855         assert(n * 512 == s->sg.size);
856         dma_buf_commit(s, s->sg.size);
857         sector_num += n;
858         ide_set_sector(s, sector_num);
859         s->nsector -= n;
860     }
861 
862     /* end of transfer ? */
863     if (s->nsector == 0) {
864         s->status = READY_STAT | SEEK_STAT;
865         ide_set_irq(s->bus);
866         goto eot;
867     }
868 
869     /* launch next transfer */
870     n = s->nsector;
871     s->io_buffer_index = 0;
872     s->io_buffer_size = n * 512;
873     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
874         /* The PRDs were too short. Reset the Active bit, but don't raise an
875          * interrupt. */
876         s->status = READY_STAT | SEEK_STAT;
877         dma_buf_commit(s, 0);
878         goto eot;
879     }
880 
881     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
882 
883     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
884         !ide_sect_range_ok(s, sector_num, n)) {
885         ide_dma_error(s);
886         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
887         return;
888     }
889 
890     offset = sector_num << BDRV_SECTOR_BITS;
891     switch (s->dma_cmd) {
892     case IDE_DMA_READ:
893         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
894                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
895         break;
896     case IDE_DMA_WRITE:
897         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
898                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
899         break;
900     case IDE_DMA_TRIM:
901         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
902                                         &s->sg, offset, BDRV_SECTOR_SIZE,
903                                         ide_issue_trim, s->blk, ide_dma_cb, s,
904                                         DMA_DIRECTION_TO_DEVICE);
905         break;
906     default:
907         abort();
908     }
909     return;
910 
911 eot:
912     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
913         block_acct_done(blk_get_stats(s->blk), &s->acct);
914     }
915     ide_set_inactive(s, stay_active);
916 }
917 
918 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
919 {
920     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
921     s->io_buffer_size = 0;
922     s->dma_cmd = dma_cmd;
923 
924     switch (dma_cmd) {
925     case IDE_DMA_READ:
926         block_acct_start(blk_get_stats(s->blk), &s->acct,
927                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
928         break;
929     case IDE_DMA_WRITE:
930         block_acct_start(blk_get_stats(s->blk), &s->acct,
931                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
932         break;
933     default:
934         break;
935     }
936 
937     ide_start_dma(s, ide_dma_cb);
938 }
939 
940 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
941 {
942     s->io_buffer_index = 0;
943     ide_set_retry(s);
944     if (s->bus->dma->ops->start_dma) {
945         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
946     }
947 }
948 
949 static void ide_sector_write(IDEState *s);
950 
951 static void ide_sector_write_timer_cb(void *opaque)
952 {
953     IDEState *s = opaque;
954     ide_set_irq(s->bus);
955 }
956 
957 static void ide_sector_write_cb(void *opaque, int ret)
958 {
959     IDEState *s = opaque;
960     int n;
961 
962     if (ret == -ECANCELED) {
963         return;
964     }
965 
966     s->pio_aiocb = NULL;
967     s->status &= ~BUSY_STAT;
968 
969     if (ret != 0) {
970         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
971             return;
972         }
973     }
974 
975     block_acct_done(blk_get_stats(s->blk), &s->acct);
976 
977     n = s->nsector;
978     if (n > s->req_nb_sectors) {
979         n = s->req_nb_sectors;
980     }
981     s->nsector -= n;
982 
983     ide_set_sector(s, ide_get_sector(s) + n);
984     if (s->nsector == 0) {
985         /* no more sectors to write */
986         ide_transfer_stop(s);
987     } else {
988         int n1 = s->nsector;
989         if (n1 > s->req_nb_sectors) {
990             n1 = s->req_nb_sectors;
991         }
992         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
993                            ide_sector_write);
994     }
995 
996     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
997         /* It seems there is a bug in the Windows 2000 installer HDD
998            IDE driver which fills the disk with empty logs when the
999            IDE write IRQ comes too early. This hack tries to correct
1000            that at the expense of slower write performances. Use this
1001            option _only_ to install Windows 2000. You must disable it
1002            for normal use. */
1003         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1004                   (NANOSECONDS_PER_SECOND / 1000));
1005     } else {
1006         ide_set_irq(s->bus);
1007     }
1008 }
1009 
1010 static void ide_sector_write(IDEState *s)
1011 {
1012     int64_t sector_num;
1013     int n;
1014 
1015     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1016     sector_num = ide_get_sector(s);
1017 
1018     n = s->nsector;
1019     if (n > s->req_nb_sectors) {
1020         n = s->req_nb_sectors;
1021     }
1022 
1023     trace_ide_sector_write(sector_num, n);
1024 
1025     if (!ide_sect_range_ok(s, sector_num, n)) {
1026         ide_rw_error(s);
1027         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1028         return;
1029     }
1030 
1031     s->iov.iov_base = s->io_buffer;
1032     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1033     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1034 
1035     block_acct_start(blk_get_stats(s->blk), &s->acct,
1036                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1037     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1038                                    &s->qiov, 0, ide_sector_write_cb, s);
1039 }
1040 
1041 static void ide_flush_cb(void *opaque, int ret)
1042 {
1043     IDEState *s = opaque;
1044 
1045     s->pio_aiocb = NULL;
1046 
1047     if (ret == -ECANCELED) {
1048         return;
1049     }
1050     if (ret < 0) {
1051         /* XXX: What sector number to set here? */
1052         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1053             return;
1054         }
1055     }
1056 
1057     if (s->blk) {
1058         block_acct_done(blk_get_stats(s->blk), &s->acct);
1059     }
1060     s->status = READY_STAT | SEEK_STAT;
1061     ide_cmd_done(s);
1062     ide_set_irq(s->bus);
1063 }
1064 
1065 static void ide_flush_cache(IDEState *s)
1066 {
1067     if (s->blk == NULL) {
1068         ide_flush_cb(s, 0);
1069         return;
1070     }
1071 
1072     s->status |= BUSY_STAT;
1073     ide_set_retry(s);
1074     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1075 
1076     if (blk_bs(s->blk)) {
1077         s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1078     } else {
1079         /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1080          * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1081          */
1082         ide_flush_cb(s, 0);
1083     }
1084 }
1085 
1086 static void ide_cfata_metadata_inquiry(IDEState *s)
1087 {
1088     uint16_t *p;
1089     uint32_t spd;
1090 
1091     p = (uint16_t *) s->io_buffer;
1092     memset(p, 0, 0x200);
1093     spd = ((s->mdata_size - 1) >> 9) + 1;
1094 
1095     put_le16(p + 0, 0x0001);			/* Data format revision */
1096     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1097     put_le16(p + 2, s->media_changed);		/* Media status */
1098     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1099     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1100     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1101     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1102 }
1103 
1104 static void ide_cfata_metadata_read(IDEState *s)
1105 {
1106     uint16_t *p;
1107 
1108     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1109         s->status = ERR_STAT;
1110         s->error = ABRT_ERR;
1111         return;
1112     }
1113 
1114     p = (uint16_t *) s->io_buffer;
1115     memset(p, 0, 0x200);
1116 
1117     put_le16(p + 0, s->media_changed);		/* Media status */
1118     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1119                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1120                                     s->nsector << 9), 0x200 - 2));
1121 }
1122 
1123 static void ide_cfata_metadata_write(IDEState *s)
1124 {
1125     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1126         s->status = ERR_STAT;
1127         s->error = ABRT_ERR;
1128         return;
1129     }
1130 
1131     s->media_changed = 0;
1132 
1133     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1134                     s->io_buffer + 2,
1135                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1136                                     s->nsector << 9), 0x200 - 2));
1137 }
1138 
1139 /* called when the inserted state of the media has changed */
1140 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1141 {
1142     IDEState *s = opaque;
1143     uint64_t nb_sectors;
1144 
1145     s->tray_open = !load;
1146     blk_get_geometry(s->blk, &nb_sectors);
1147     s->nb_sectors = nb_sectors;
1148 
1149     /*
1150      * First indicate to the guest that a CD has been removed.  That's
1151      * done on the next command the guest sends us.
1152      *
1153      * Then we set UNIT_ATTENTION, by which the guest will
1154      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1155      */
1156     s->cdrom_changed = 1;
1157     s->events.new_media = true;
1158     s->events.eject_request = false;
1159     ide_set_irq(s->bus);
1160 }
1161 
1162 static void ide_cd_eject_request_cb(void *opaque, bool force)
1163 {
1164     IDEState *s = opaque;
1165 
1166     s->events.eject_request = true;
1167     if (force) {
1168         s->tray_locked = false;
1169     }
1170     ide_set_irq(s->bus);
1171 }
1172 
1173 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1174 {
1175     s->lba48 = lba48;
1176 
1177     /* handle the 'magic' 0 nsector count conversion here. to avoid
1178      * fiddling with the rest of the read logic, we just store the
1179      * full sector count in ->nsector and ignore ->hob_nsector from now
1180      */
1181     if (!s->lba48) {
1182 	if (!s->nsector)
1183 	    s->nsector = 256;
1184     } else {
1185 	if (!s->nsector && !s->hob_nsector)
1186 	    s->nsector = 65536;
1187 	else {
1188 	    int lo = s->nsector;
1189 	    int hi = s->hob_nsector;
1190 
1191 	    s->nsector = (hi << 8) | lo;
1192 	}
1193     }
1194 }
1195 
1196 static void ide_clear_hob(IDEBus *bus)
1197 {
1198     /* any write clears HOB high bit of device control register */
1199     bus->ifs[0].select &= ~(1 << 7);
1200     bus->ifs[1].select &= ~(1 << 7);
1201 }
1202 
1203 /* IOport [W]rite [R]egisters */
1204 enum ATA_IOPORT_WR {
1205     ATA_IOPORT_WR_DATA = 0,
1206     ATA_IOPORT_WR_FEATURES = 1,
1207     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1208     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1209     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1210     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1211     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1212     ATA_IOPORT_WR_COMMAND = 7,
1213     ATA_IOPORT_WR_NUM_REGISTERS,
1214 };
1215 
1216 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1217     [ATA_IOPORT_WR_DATA] = "Data",
1218     [ATA_IOPORT_WR_FEATURES] = "Features",
1219     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1220     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1221     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1222     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1223     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1224     [ATA_IOPORT_WR_COMMAND] = "Command"
1225 };
1226 
1227 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1228 {
1229     IDEBus *bus = opaque;
1230     IDEState *s = idebus_active_if(bus);
1231     int reg_num = addr & 7;
1232 
1233     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1234 
1235     /* ignore writes to command block while busy with previous command */
1236     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1237         return;
1238     }
1239 
1240     switch (reg_num) {
1241     case 0:
1242         break;
1243     case ATA_IOPORT_WR_FEATURES:
1244         ide_clear_hob(bus);
1245         /* NOTE: data is written to the two drives */
1246         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1247         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1248         bus->ifs[0].feature = val;
1249         bus->ifs[1].feature = val;
1250         break;
1251     case ATA_IOPORT_WR_SECTOR_COUNT:
1252 	ide_clear_hob(bus);
1253 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1254 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1255         bus->ifs[0].nsector = val;
1256         bus->ifs[1].nsector = val;
1257         break;
1258     case ATA_IOPORT_WR_SECTOR_NUMBER:
1259 	ide_clear_hob(bus);
1260 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1261 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1262         bus->ifs[0].sector = val;
1263         bus->ifs[1].sector = val;
1264         break;
1265     case ATA_IOPORT_WR_CYLINDER_LOW:
1266 	ide_clear_hob(bus);
1267 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1268 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1269         bus->ifs[0].lcyl = val;
1270         bus->ifs[1].lcyl = val;
1271         break;
1272     case ATA_IOPORT_WR_CYLINDER_HIGH:
1273 	ide_clear_hob(bus);
1274 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1275 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1276         bus->ifs[0].hcyl = val;
1277         bus->ifs[1].hcyl = val;
1278         break;
1279     case ATA_IOPORT_WR_DEVICE_HEAD:
1280 	/* FIXME: HOB readback uses bit 7 */
1281         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1282         bus->ifs[1].select = (val | 0x10) | 0xa0;
1283         /* select drive */
1284         bus->unit = (val >> 4) & 1;
1285         break;
1286     default:
1287     case ATA_IOPORT_WR_COMMAND:
1288         /* command */
1289         ide_exec_cmd(bus, val);
1290         break;
1291     }
1292 }
1293 
1294 static void ide_reset(IDEState *s)
1295 {
1296     trace_ide_reset(s);
1297 
1298     if (s->pio_aiocb) {
1299         blk_aio_cancel(s->pio_aiocb);
1300         s->pio_aiocb = NULL;
1301     }
1302 
1303     if (s->drive_kind == IDE_CFATA)
1304         s->mult_sectors = 0;
1305     else
1306         s->mult_sectors = MAX_MULT_SECTORS;
1307     /* ide regs */
1308     s->feature = 0;
1309     s->error = 0;
1310     s->nsector = 0;
1311     s->sector = 0;
1312     s->lcyl = 0;
1313     s->hcyl = 0;
1314 
1315     /* lba48 */
1316     s->hob_feature = 0;
1317     s->hob_sector = 0;
1318     s->hob_nsector = 0;
1319     s->hob_lcyl = 0;
1320     s->hob_hcyl = 0;
1321 
1322     s->select = 0xa0;
1323     s->status = READY_STAT | SEEK_STAT;
1324 
1325     s->lba48 = 0;
1326 
1327     /* ATAPI specific */
1328     s->sense_key = 0;
1329     s->asc = 0;
1330     s->cdrom_changed = 0;
1331     s->packet_transfer_size = 0;
1332     s->elementary_transfer_size = 0;
1333     s->io_buffer_index = 0;
1334     s->cd_sector_size = 0;
1335     s->atapi_dma = 0;
1336     s->tray_locked = 0;
1337     s->tray_open = 0;
1338     /* ATA DMA state */
1339     s->io_buffer_size = 0;
1340     s->req_nb_sectors = 0;
1341 
1342     ide_set_signature(s);
1343     /* init the transfer handler so that 0xffff is returned on data
1344        accesses */
1345     s->end_transfer_func = ide_dummy_transfer_stop;
1346     ide_dummy_transfer_stop(s);
1347     s->media_changed = 0;
1348 }
1349 
1350 static bool cmd_nop(IDEState *s, uint8_t cmd)
1351 {
1352     return true;
1353 }
1354 
1355 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1356 {
1357     /* Halt PIO (in the DRQ phase), then DMA */
1358     ide_transfer_cancel(s);
1359     ide_cancel_dma_sync(s);
1360 
1361     /* Reset any PIO commands, reset signature, etc */
1362     ide_reset(s);
1363 
1364     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1365      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1366     s->status = 0x00;
1367 
1368     /* Do not overwrite status register */
1369     return false;
1370 }
1371 
1372 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1373 {
1374     switch (s->feature) {
1375     case DSM_TRIM:
1376         if (s->blk) {
1377             ide_sector_start_dma(s, IDE_DMA_TRIM);
1378             return false;
1379         }
1380         break;
1381     }
1382 
1383     ide_abort_command(s);
1384     return true;
1385 }
1386 
1387 static bool cmd_identify(IDEState *s, uint8_t cmd)
1388 {
1389     if (s->blk && s->drive_kind != IDE_CD) {
1390         if (s->drive_kind != IDE_CFATA) {
1391             ide_identify(s);
1392         } else {
1393             ide_cfata_identify(s);
1394         }
1395         s->status = READY_STAT | SEEK_STAT;
1396         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1397         ide_set_irq(s->bus);
1398         return false;
1399     } else {
1400         if (s->drive_kind == IDE_CD) {
1401             ide_set_signature(s);
1402         }
1403         ide_abort_command(s);
1404     }
1405 
1406     return true;
1407 }
1408 
1409 static bool cmd_verify(IDEState *s, uint8_t cmd)
1410 {
1411     bool lba48 = (cmd == WIN_VERIFY_EXT);
1412 
1413     /* do sector number check ? */
1414     ide_cmd_lba48_transform(s, lba48);
1415 
1416     return true;
1417 }
1418 
1419 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1420 {
1421     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1422         /* Disable Read and Write Multiple */
1423         s->mult_sectors = 0;
1424     } else if ((s->nsector & 0xff) != 0 &&
1425         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1426          (s->nsector & (s->nsector - 1)) != 0)) {
1427         ide_abort_command(s);
1428     } else {
1429         s->mult_sectors = s->nsector & 0xff;
1430     }
1431 
1432     return true;
1433 }
1434 
1435 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1436 {
1437     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1438 
1439     if (!s->blk || !s->mult_sectors) {
1440         ide_abort_command(s);
1441         return true;
1442     }
1443 
1444     ide_cmd_lba48_transform(s, lba48);
1445     s->req_nb_sectors = s->mult_sectors;
1446     ide_sector_read(s);
1447     return false;
1448 }
1449 
1450 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1451 {
1452     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1453     int n;
1454 
1455     if (!s->blk || !s->mult_sectors) {
1456         ide_abort_command(s);
1457         return true;
1458     }
1459 
1460     ide_cmd_lba48_transform(s, lba48);
1461 
1462     s->req_nb_sectors = s->mult_sectors;
1463     n = MIN(s->nsector, s->req_nb_sectors);
1464 
1465     s->status = SEEK_STAT | READY_STAT;
1466     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1467 
1468     s->media_changed = 1;
1469 
1470     return false;
1471 }
1472 
1473 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1474 {
1475     bool lba48 = (cmd == WIN_READ_EXT);
1476 
1477     if (s->drive_kind == IDE_CD) {
1478         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1479         ide_abort_command(s);
1480         return true;
1481     }
1482 
1483     if (!s->blk) {
1484         ide_abort_command(s);
1485         return true;
1486     }
1487 
1488     ide_cmd_lba48_transform(s, lba48);
1489     s->req_nb_sectors = 1;
1490     ide_sector_read(s);
1491 
1492     return false;
1493 }
1494 
1495 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1496 {
1497     bool lba48 = (cmd == WIN_WRITE_EXT);
1498 
1499     if (!s->blk) {
1500         ide_abort_command(s);
1501         return true;
1502     }
1503 
1504     ide_cmd_lba48_transform(s, lba48);
1505 
1506     s->req_nb_sectors = 1;
1507     s->status = SEEK_STAT | READY_STAT;
1508     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1509 
1510     s->media_changed = 1;
1511 
1512     return false;
1513 }
1514 
1515 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1516 {
1517     bool lba48 = (cmd == WIN_READDMA_EXT);
1518 
1519     if (!s->blk) {
1520         ide_abort_command(s);
1521         return true;
1522     }
1523 
1524     ide_cmd_lba48_transform(s, lba48);
1525     ide_sector_start_dma(s, IDE_DMA_READ);
1526 
1527     return false;
1528 }
1529 
1530 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1531 {
1532     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1533 
1534     if (!s->blk) {
1535         ide_abort_command(s);
1536         return true;
1537     }
1538 
1539     ide_cmd_lba48_transform(s, lba48);
1540     ide_sector_start_dma(s, IDE_DMA_WRITE);
1541 
1542     s->media_changed = 1;
1543 
1544     return false;
1545 }
1546 
1547 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1548 {
1549     ide_flush_cache(s);
1550     return false;
1551 }
1552 
1553 static bool cmd_seek(IDEState *s, uint8_t cmd)
1554 {
1555     /* XXX: Check that seek is within bounds */
1556     return true;
1557 }
1558 
1559 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1560 {
1561     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1562 
1563     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1564     if (s->nb_sectors == 0) {
1565         ide_abort_command(s);
1566         return true;
1567     }
1568 
1569     ide_cmd_lba48_transform(s, lba48);
1570     ide_set_sector(s, s->nb_sectors - 1);
1571 
1572     return true;
1573 }
1574 
1575 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1576 {
1577     s->nsector = 0xff; /* device active or idle */
1578     return true;
1579 }
1580 
1581 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1582 {
1583     uint16_t *identify_data;
1584 
1585     if (!s->blk) {
1586         ide_abort_command(s);
1587         return true;
1588     }
1589 
1590     /* XXX: valid for CDROM ? */
1591     switch (s->feature) {
1592     case 0x02: /* write cache enable */
1593         blk_set_enable_write_cache(s->blk, true);
1594         identify_data = (uint16_t *)s->identify_data;
1595         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1596         return true;
1597     case 0x82: /* write cache disable */
1598         blk_set_enable_write_cache(s->blk, false);
1599         identify_data = (uint16_t *)s->identify_data;
1600         put_le16(identify_data + 85, (1 << 14) | 1);
1601         ide_flush_cache(s);
1602         return false;
1603     case 0xcc: /* reverting to power-on defaults enable */
1604     case 0x66: /* reverting to power-on defaults disable */
1605     case 0xaa: /* read look-ahead enable */
1606     case 0x55: /* read look-ahead disable */
1607     case 0x05: /* set advanced power management mode */
1608     case 0x85: /* disable advanced power management mode */
1609     case 0x69: /* NOP */
1610     case 0x67: /* NOP */
1611     case 0x96: /* NOP */
1612     case 0x9a: /* NOP */
1613     case 0x42: /* enable Automatic Acoustic Mode */
1614     case 0xc2: /* disable Automatic Acoustic Mode */
1615         return true;
1616     case 0x03: /* set transfer mode */
1617         {
1618             uint8_t val = s->nsector & 0x07;
1619             identify_data = (uint16_t *)s->identify_data;
1620 
1621             switch (s->nsector >> 3) {
1622             case 0x00: /* pio default */
1623             case 0x01: /* pio mode */
1624                 put_le16(identify_data + 62, 0x07);
1625                 put_le16(identify_data + 63, 0x07);
1626                 put_le16(identify_data + 88, 0x3f);
1627                 break;
1628             case 0x02: /* sigle word dma mode*/
1629                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1630                 put_le16(identify_data + 63, 0x07);
1631                 put_le16(identify_data + 88, 0x3f);
1632                 break;
1633             case 0x04: /* mdma mode */
1634                 put_le16(identify_data + 62, 0x07);
1635                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1636                 put_le16(identify_data + 88, 0x3f);
1637                 break;
1638             case 0x08: /* udma mode */
1639                 put_le16(identify_data + 62, 0x07);
1640                 put_le16(identify_data + 63, 0x07);
1641                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1642                 break;
1643             default:
1644                 goto abort_cmd;
1645             }
1646             return true;
1647         }
1648     }
1649 
1650 abort_cmd:
1651     ide_abort_command(s);
1652     return true;
1653 }
1654 
1655 
1656 /*** ATAPI commands ***/
1657 
1658 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1659 {
1660     ide_atapi_identify(s);
1661     s->status = READY_STAT | SEEK_STAT;
1662     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1663     ide_set_irq(s->bus);
1664     return false;
1665 }
1666 
1667 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1668 {
1669     ide_set_signature(s);
1670 
1671     if (s->drive_kind == IDE_CD) {
1672         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1673                         * devices to return a clear status register
1674                         * with READY_STAT *not* set. */
1675         s->error = 0x01;
1676     } else {
1677         s->status = READY_STAT | SEEK_STAT;
1678         /* The bits of the error register are not as usual for this command!
1679          * They are part of the regular output (this is why ERR_STAT isn't set)
1680          * Device 0 passed, Device 1 passed or not present. */
1681         s->error = 0x01;
1682         ide_set_irq(s->bus);
1683     }
1684 
1685     return false;
1686 }
1687 
1688 static bool cmd_packet(IDEState *s, uint8_t cmd)
1689 {
1690     /* overlapping commands not supported */
1691     if (s->feature & 0x02) {
1692         ide_abort_command(s);
1693         return true;
1694     }
1695 
1696     s->status = READY_STAT | SEEK_STAT;
1697     s->atapi_dma = s->feature & 1;
1698     if (s->atapi_dma) {
1699         s->dma_cmd = IDE_DMA_ATAPI;
1700     }
1701     s->nsector = 1;
1702     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1703                        ide_atapi_cmd);
1704     return false;
1705 }
1706 
1707 
1708 /*** CF-ATA commands ***/
1709 
1710 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1711 {
1712     s->error = 0x09;    /* miscellaneous error */
1713     s->status = READY_STAT | SEEK_STAT;
1714     ide_set_irq(s->bus);
1715 
1716     return false;
1717 }
1718 
1719 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1720 {
1721     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1722      * required for Windows 8 to work with AHCI */
1723 
1724     if (cmd == CFA_WEAR_LEVEL) {
1725         s->nsector = 0;
1726     }
1727 
1728     if (cmd == CFA_ERASE_SECTORS) {
1729         s->media_changed = 1;
1730     }
1731 
1732     return true;
1733 }
1734 
1735 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1736 {
1737     s->status = READY_STAT | SEEK_STAT;
1738 
1739     memset(s->io_buffer, 0, 0x200);
1740     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1741     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1742     s->io_buffer[0x02] = s->select;                 /* Head */
1743     s->io_buffer[0x03] = s->sector;                 /* Sector */
1744     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1745     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1746     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1747     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1748     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1749     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1750     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1751 
1752     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1753     ide_set_irq(s->bus);
1754 
1755     return false;
1756 }
1757 
1758 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1759 {
1760     switch (s->feature) {
1761     case 0x02:  /* Inquiry Metadata Storage */
1762         ide_cfata_metadata_inquiry(s);
1763         break;
1764     case 0x03:  /* Read Metadata Storage */
1765         ide_cfata_metadata_read(s);
1766         break;
1767     case 0x04:  /* Write Metadata Storage */
1768         ide_cfata_metadata_write(s);
1769         break;
1770     default:
1771         ide_abort_command(s);
1772         return true;
1773     }
1774 
1775     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1776     s->status = 0x00; /* NOTE: READY is _not_ set */
1777     ide_set_irq(s->bus);
1778 
1779     return false;
1780 }
1781 
1782 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1783 {
1784     switch (s->feature) {
1785     case 0x01:  /* sense temperature in device */
1786         s->nsector = 0x50;      /* +20 C */
1787         break;
1788     default:
1789         ide_abort_command(s);
1790         return true;
1791     }
1792 
1793     return true;
1794 }
1795 
1796 
1797 /*** SMART commands ***/
1798 
1799 static bool cmd_smart(IDEState *s, uint8_t cmd)
1800 {
1801     int n;
1802 
1803     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1804         goto abort_cmd;
1805     }
1806 
1807     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1808         goto abort_cmd;
1809     }
1810 
1811     switch (s->feature) {
1812     case SMART_DISABLE:
1813         s->smart_enabled = 0;
1814         return true;
1815 
1816     case SMART_ENABLE:
1817         s->smart_enabled = 1;
1818         return true;
1819 
1820     case SMART_ATTR_AUTOSAVE:
1821         switch (s->sector) {
1822         case 0x00:
1823             s->smart_autosave = 0;
1824             break;
1825         case 0xf1:
1826             s->smart_autosave = 1;
1827             break;
1828         default:
1829             goto abort_cmd;
1830         }
1831         return true;
1832 
1833     case SMART_STATUS:
1834         if (!s->smart_errors) {
1835             s->hcyl = 0xc2;
1836             s->lcyl = 0x4f;
1837         } else {
1838             s->hcyl = 0x2c;
1839             s->lcyl = 0xf4;
1840         }
1841         return true;
1842 
1843     case SMART_READ_THRESH:
1844         memset(s->io_buffer, 0, 0x200);
1845         s->io_buffer[0] = 0x01; /* smart struct version */
1846 
1847         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1848             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1849             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1850         }
1851 
1852         /* checksum */
1853         for (n = 0; n < 511; n++) {
1854             s->io_buffer[511] += s->io_buffer[n];
1855         }
1856         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1857 
1858         s->status = READY_STAT | SEEK_STAT;
1859         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1860         ide_set_irq(s->bus);
1861         return false;
1862 
1863     case SMART_READ_DATA:
1864         memset(s->io_buffer, 0, 0x200);
1865         s->io_buffer[0] = 0x01; /* smart struct version */
1866 
1867         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1868             int i;
1869             for (i = 0; i < 11; i++) {
1870                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1871             }
1872         }
1873 
1874         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1875         if (s->smart_selftest_count == 0) {
1876             s->io_buffer[363] = 0;
1877         } else {
1878             s->io_buffer[363] =
1879                 s->smart_selftest_data[3 +
1880                            (s->smart_selftest_count - 1) *
1881                            24];
1882         }
1883         s->io_buffer[364] = 0x20;
1884         s->io_buffer[365] = 0x01;
1885         /* offline data collection capacity: execute + self-test*/
1886         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1887         s->io_buffer[368] = 0x03; /* smart capability (1) */
1888         s->io_buffer[369] = 0x00; /* smart capability (2) */
1889         s->io_buffer[370] = 0x01; /* error logging supported */
1890         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1891         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1892         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1893 
1894         for (n = 0; n < 511; n++) {
1895             s->io_buffer[511] += s->io_buffer[n];
1896         }
1897         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1898 
1899         s->status = READY_STAT | SEEK_STAT;
1900         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1901         ide_set_irq(s->bus);
1902         return false;
1903 
1904     case SMART_READ_LOG:
1905         switch (s->sector) {
1906         case 0x01: /* summary smart error log */
1907             memset(s->io_buffer, 0, 0x200);
1908             s->io_buffer[0] = 0x01;
1909             s->io_buffer[1] = 0x00; /* no error entries */
1910             s->io_buffer[452] = s->smart_errors & 0xff;
1911             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1912 
1913             for (n = 0; n < 511; n++) {
1914                 s->io_buffer[511] += s->io_buffer[n];
1915             }
1916             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1917             break;
1918         case 0x06: /* smart self test log */
1919             memset(s->io_buffer, 0, 0x200);
1920             s->io_buffer[0] = 0x01;
1921             if (s->smart_selftest_count == 0) {
1922                 s->io_buffer[508] = 0;
1923             } else {
1924                 s->io_buffer[508] = s->smart_selftest_count;
1925                 for (n = 2; n < 506; n++)  {
1926                     s->io_buffer[n] = s->smart_selftest_data[n];
1927                 }
1928             }
1929 
1930             for (n = 0; n < 511; n++) {
1931                 s->io_buffer[511] += s->io_buffer[n];
1932             }
1933             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1934             break;
1935         default:
1936             goto abort_cmd;
1937         }
1938         s->status = READY_STAT | SEEK_STAT;
1939         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1940         ide_set_irq(s->bus);
1941         return false;
1942 
1943     case SMART_EXECUTE_OFFLINE:
1944         switch (s->sector) {
1945         case 0: /* off-line routine */
1946         case 1: /* short self test */
1947         case 2: /* extended self test */
1948             s->smart_selftest_count++;
1949             if (s->smart_selftest_count > 21) {
1950                 s->smart_selftest_count = 1;
1951             }
1952             n = 2 + (s->smart_selftest_count - 1) * 24;
1953             s->smart_selftest_data[n] = s->sector;
1954             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1955             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1956             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1957             break;
1958         default:
1959             goto abort_cmd;
1960         }
1961         return true;
1962     }
1963 
1964 abort_cmd:
1965     ide_abort_command(s);
1966     return true;
1967 }
1968 
1969 #define HD_OK (1u << IDE_HD)
1970 #define CD_OK (1u << IDE_CD)
1971 #define CFA_OK (1u << IDE_CFATA)
1972 #define HD_CFA_OK (HD_OK | CFA_OK)
1973 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1974 
1975 /* Set the Disk Seek Completed status bit during completion */
1976 #define SET_DSC (1u << 8)
1977 
1978 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1979 static const struct {
1980     /* Returns true if the completion code should be run */
1981     bool (*handler)(IDEState *s, uint8_t cmd);
1982     int flags;
1983 } ide_cmd_table[0x100] = {
1984     /* NOP not implemented, mandatory for CD */
1985     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1986     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1987     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1988     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1989     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1990     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1991     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1992     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1993     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1994     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1995     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1996     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1997     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1998     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1999     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2000     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2001     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2002     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2003     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2004     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2005     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2006     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2007     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2008     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2009     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2010     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2011     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2012     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2013     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2014     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2015     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2016     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2017     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2018     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2019     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2020     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2021     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2022     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2023     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2024     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2025     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2026     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2027     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2028     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2029     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2030     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2031     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2032     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2033     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2034     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2035     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2036     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2037     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2038     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2039     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2040     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2041 };
2042 
2043 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2044 {
2045     return cmd < ARRAY_SIZE(ide_cmd_table)
2046         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2047 }
2048 
2049 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2050 {
2051     IDEState *s;
2052     bool complete;
2053 
2054     s = idebus_active_if(bus);
2055     trace_ide_exec_cmd(bus, s, val);
2056 
2057     /* ignore commands to non existent slave */
2058     if (s != bus->ifs && !s->blk) {
2059         return;
2060     }
2061 
2062     /* Only RESET is allowed while BSY and/or DRQ are set,
2063      * and only to ATAPI devices. */
2064     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2065         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2066             return;
2067         }
2068     }
2069 
2070     if (!ide_cmd_permitted(s, val)) {
2071         ide_abort_command(s);
2072         ide_set_irq(s->bus);
2073         return;
2074     }
2075 
2076     s->status = READY_STAT | BUSY_STAT;
2077     s->error = 0;
2078     s->io_buffer_offset = 0;
2079 
2080     complete = ide_cmd_table[val].handler(s, val);
2081     if (complete) {
2082         s->status &= ~BUSY_STAT;
2083         assert(!!s->error == !!(s->status & ERR_STAT));
2084 
2085         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2086             s->status |= SEEK_STAT;
2087         }
2088 
2089         ide_cmd_done(s);
2090         ide_set_irq(s->bus);
2091     }
2092 }
2093 
2094 /* IOport [R]ead [R]egisters */
2095 enum ATA_IOPORT_RR {
2096     ATA_IOPORT_RR_DATA = 0,
2097     ATA_IOPORT_RR_ERROR = 1,
2098     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2099     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2100     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2101     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2102     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2103     ATA_IOPORT_RR_STATUS = 7,
2104     ATA_IOPORT_RR_NUM_REGISTERS,
2105 };
2106 
2107 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2108     [ATA_IOPORT_RR_DATA] = "Data",
2109     [ATA_IOPORT_RR_ERROR] = "Error",
2110     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2111     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2112     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2113     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2114     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2115     [ATA_IOPORT_RR_STATUS] = "Status"
2116 };
2117 
2118 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2119 {
2120     IDEBus *bus = opaque;
2121     IDEState *s = idebus_active_if(bus);
2122     uint32_t reg_num;
2123     int ret, hob;
2124 
2125     reg_num = addr & 7;
2126     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2127     //hob = s->select & (1 << 7);
2128     hob = 0;
2129     switch (reg_num) {
2130     case ATA_IOPORT_RR_DATA:
2131         ret = 0xff;
2132         break;
2133     case ATA_IOPORT_RR_ERROR:
2134         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2135             (s != bus->ifs && !s->blk)) {
2136             ret = 0;
2137         } else if (!hob) {
2138             ret = s->error;
2139         } else {
2140 	    ret = s->hob_feature;
2141         }
2142         break;
2143     case ATA_IOPORT_RR_SECTOR_COUNT:
2144         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2145             ret = 0;
2146         } else if (!hob) {
2147             ret = s->nsector & 0xff;
2148         } else {
2149 	    ret = s->hob_nsector;
2150         }
2151         break;
2152     case ATA_IOPORT_RR_SECTOR_NUMBER:
2153         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2154             ret = 0;
2155         } else if (!hob) {
2156             ret = s->sector;
2157         } else {
2158 	    ret = s->hob_sector;
2159         }
2160         break;
2161     case ATA_IOPORT_RR_CYLINDER_LOW:
2162         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2163             ret = 0;
2164         } else if (!hob) {
2165             ret = s->lcyl;
2166         } else {
2167 	    ret = s->hob_lcyl;
2168         }
2169         break;
2170     case ATA_IOPORT_RR_CYLINDER_HIGH:
2171         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2172             ret = 0;
2173         } else if (!hob) {
2174             ret = s->hcyl;
2175         } else {
2176 	    ret = s->hob_hcyl;
2177         }
2178         break;
2179     case ATA_IOPORT_RR_DEVICE_HEAD:
2180         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2181             ret = 0;
2182         } else {
2183             ret = s->select;
2184         }
2185         break;
2186     default:
2187     case ATA_IOPORT_RR_STATUS:
2188         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2189             (s != bus->ifs && !s->blk)) {
2190             ret = 0;
2191         } else {
2192             ret = s->status;
2193         }
2194         qemu_irq_lower(bus->irq);
2195         break;
2196     }
2197 
2198     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2199     return ret;
2200 }
2201 
2202 uint32_t ide_status_read(void *opaque, uint32_t addr)
2203 {
2204     IDEBus *bus = opaque;
2205     IDEState *s = idebus_active_if(bus);
2206     int ret;
2207 
2208     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2209         (s != bus->ifs && !s->blk)) {
2210         ret = 0;
2211     } else {
2212         ret = s->status;
2213     }
2214 
2215     trace_ide_status_read(addr, ret, bus, s);
2216     return ret;
2217 }
2218 
2219 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2220 {
2221     IDEBus *bus = opaque;
2222     IDEState *s;
2223     int i;
2224 
2225     trace_ide_cmd_write(addr, val, bus);
2226 
2227     /* common for both drives */
2228     if (!(bus->cmd & IDE_CMD_RESET) &&
2229         (val & IDE_CMD_RESET)) {
2230         /* reset low to high */
2231         for(i = 0;i < 2; i++) {
2232             s = &bus->ifs[i];
2233             s->status = BUSY_STAT | SEEK_STAT;
2234             s->error = 0x01;
2235         }
2236     } else if ((bus->cmd & IDE_CMD_RESET) &&
2237                !(val & IDE_CMD_RESET)) {
2238         /* high to low */
2239         for(i = 0;i < 2; i++) {
2240             s = &bus->ifs[i];
2241             if (s->drive_kind == IDE_CD)
2242                 s->status = 0x00; /* NOTE: READY is _not_ set */
2243             else
2244                 s->status = READY_STAT | SEEK_STAT;
2245             ide_set_signature(s);
2246         }
2247     }
2248 
2249     bus->cmd = val;
2250 }
2251 
2252 /*
2253  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2254  * transferred from the device to the guest), false if it's a PIO in
2255  */
2256 static bool ide_is_pio_out(IDEState *s)
2257 {
2258     if (s->end_transfer_func == ide_sector_write ||
2259         s->end_transfer_func == ide_atapi_cmd) {
2260         return false;
2261     } else if (s->end_transfer_func == ide_sector_read ||
2262                s->end_transfer_func == ide_transfer_stop ||
2263                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2264                s->end_transfer_func == ide_dummy_transfer_stop) {
2265         return true;
2266     }
2267 
2268     abort();
2269 }
2270 
2271 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2272 {
2273     IDEBus *bus = opaque;
2274     IDEState *s = idebus_active_if(bus);
2275     uint8_t *p;
2276 
2277     trace_ide_data_writew(addr, val, bus, s);
2278 
2279     /* PIO data access allowed only when DRQ bit is set. The result of a write
2280      * during PIO out is indeterminate, just ignore it. */
2281     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2282         return;
2283     }
2284 
2285     p = s->data_ptr;
2286     if (p + 2 > s->data_end) {
2287         return;
2288     }
2289 
2290     *(uint16_t *)p = le16_to_cpu(val);
2291     p += 2;
2292     s->data_ptr = p;
2293     if (p >= s->data_end) {
2294         s->status &= ~DRQ_STAT;
2295         s->end_transfer_func(s);
2296     }
2297 }
2298 
2299 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2300 {
2301     IDEBus *bus = opaque;
2302     IDEState *s = idebus_active_if(bus);
2303     uint8_t *p;
2304     int ret;
2305 
2306     /* PIO data access allowed only when DRQ bit is set. The result of a read
2307      * during PIO in is indeterminate, return 0 and don't move forward. */
2308     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2309         return 0;
2310     }
2311 
2312     p = s->data_ptr;
2313     if (p + 2 > s->data_end) {
2314         return 0;
2315     }
2316 
2317     ret = cpu_to_le16(*(uint16_t *)p);
2318     p += 2;
2319     s->data_ptr = p;
2320     if (p >= s->data_end) {
2321         s->status &= ~DRQ_STAT;
2322         s->end_transfer_func(s);
2323     }
2324 
2325     trace_ide_data_readw(addr, ret, bus, s);
2326     return ret;
2327 }
2328 
2329 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2330 {
2331     IDEBus *bus = opaque;
2332     IDEState *s = idebus_active_if(bus);
2333     uint8_t *p;
2334 
2335     trace_ide_data_writel(addr, val, bus, s);
2336 
2337     /* PIO data access allowed only when DRQ bit is set. The result of a write
2338      * during PIO out is indeterminate, just ignore it. */
2339     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2340         return;
2341     }
2342 
2343     p = s->data_ptr;
2344     if (p + 4 > s->data_end) {
2345         return;
2346     }
2347 
2348     *(uint32_t *)p = le32_to_cpu(val);
2349     p += 4;
2350     s->data_ptr = p;
2351     if (p >= s->data_end) {
2352         s->status &= ~DRQ_STAT;
2353         s->end_transfer_func(s);
2354     }
2355 }
2356 
2357 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2358 {
2359     IDEBus *bus = opaque;
2360     IDEState *s = idebus_active_if(bus);
2361     uint8_t *p;
2362     int ret;
2363 
2364     /* PIO data access allowed only when DRQ bit is set. The result of a read
2365      * during PIO in is indeterminate, return 0 and don't move forward. */
2366     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2367         ret = 0;
2368         goto out;
2369     }
2370 
2371     p = s->data_ptr;
2372     if (p + 4 > s->data_end) {
2373         return 0;
2374     }
2375 
2376     ret = cpu_to_le32(*(uint32_t *)p);
2377     p += 4;
2378     s->data_ptr = p;
2379     if (p >= s->data_end) {
2380         s->status &= ~DRQ_STAT;
2381         s->end_transfer_func(s);
2382     }
2383 
2384 out:
2385     trace_ide_data_readl(addr, ret, bus, s);
2386     return ret;
2387 }
2388 
2389 static void ide_dummy_transfer_stop(IDEState *s)
2390 {
2391     s->data_ptr = s->io_buffer;
2392     s->data_end = s->io_buffer;
2393     s->io_buffer[0] = 0xff;
2394     s->io_buffer[1] = 0xff;
2395     s->io_buffer[2] = 0xff;
2396     s->io_buffer[3] = 0xff;
2397 }
2398 
2399 void ide_bus_reset(IDEBus *bus)
2400 {
2401     bus->unit = 0;
2402     bus->cmd = 0;
2403     ide_reset(&bus->ifs[0]);
2404     ide_reset(&bus->ifs[1]);
2405     ide_clear_hob(bus);
2406 
2407     /* pending async DMA */
2408     if (bus->dma->aiocb) {
2409         trace_ide_bus_reset_aio();
2410         blk_aio_cancel(bus->dma->aiocb);
2411         bus->dma->aiocb = NULL;
2412     }
2413 
2414     /* reset dma provider too */
2415     if (bus->dma->ops->reset) {
2416         bus->dma->ops->reset(bus->dma);
2417     }
2418 }
2419 
2420 static bool ide_cd_is_tray_open(void *opaque)
2421 {
2422     return ((IDEState *)opaque)->tray_open;
2423 }
2424 
2425 static bool ide_cd_is_medium_locked(void *opaque)
2426 {
2427     return ((IDEState *)opaque)->tray_locked;
2428 }
2429 
2430 static void ide_resize_cb(void *opaque)
2431 {
2432     IDEState *s = opaque;
2433     uint64_t nb_sectors;
2434 
2435     if (!s->identify_set) {
2436         return;
2437     }
2438 
2439     blk_get_geometry(s->blk, &nb_sectors);
2440     s->nb_sectors = nb_sectors;
2441 
2442     /* Update the identify data buffer. */
2443     if (s->drive_kind == IDE_CFATA) {
2444         ide_cfata_identify_size(s);
2445     } else {
2446         /* IDE_CD uses a different set of callbacks entirely. */
2447         assert(s->drive_kind != IDE_CD);
2448         ide_identify_size(s);
2449     }
2450 }
2451 
2452 static const BlockDevOps ide_cd_block_ops = {
2453     .change_media_cb = ide_cd_change_cb,
2454     .eject_request_cb = ide_cd_eject_request_cb,
2455     .is_tray_open = ide_cd_is_tray_open,
2456     .is_medium_locked = ide_cd_is_medium_locked,
2457 };
2458 
2459 static const BlockDevOps ide_hd_block_ops = {
2460     .resize_cb = ide_resize_cb,
2461 };
2462 
2463 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2464                    const char *version, const char *serial, const char *model,
2465                    uint64_t wwn,
2466                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2467                    int chs_trans, Error **errp)
2468 {
2469     uint64_t nb_sectors;
2470 
2471     s->blk = blk;
2472     s->drive_kind = kind;
2473 
2474     blk_get_geometry(blk, &nb_sectors);
2475     s->cylinders = cylinders;
2476     s->heads = heads;
2477     s->sectors = secs;
2478     s->chs_trans = chs_trans;
2479     s->nb_sectors = nb_sectors;
2480     s->wwn = wwn;
2481     /* The SMART values should be preserved across power cycles
2482        but they aren't.  */
2483     s->smart_enabled = 1;
2484     s->smart_autosave = 1;
2485     s->smart_errors = 0;
2486     s->smart_selftest_count = 0;
2487     if (kind == IDE_CD) {
2488         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2489         blk_set_guest_block_size(blk, 2048);
2490     } else {
2491         if (!blk_is_inserted(s->blk)) {
2492             error_setg(errp, "Device needs media, but drive is empty");
2493             return -1;
2494         }
2495         if (blk_is_read_only(blk)) {
2496             error_setg(errp, "Can't use a read-only drive");
2497             return -1;
2498         }
2499         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2500     }
2501     if (serial) {
2502         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2503     } else {
2504         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2505                  "QM%05d", s->drive_serial);
2506     }
2507     if (model) {
2508         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2509     } else {
2510         switch (kind) {
2511         case IDE_CD:
2512             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2513             break;
2514         case IDE_CFATA:
2515             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2516             break;
2517         default:
2518             strcpy(s->drive_model_str, "QEMU HARDDISK");
2519             break;
2520         }
2521     }
2522 
2523     if (version) {
2524         pstrcpy(s->version, sizeof(s->version), version);
2525     } else {
2526         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2527     }
2528 
2529     ide_reset(s);
2530     blk_iostatus_enable(blk);
2531     return 0;
2532 }
2533 
2534 static void ide_init1(IDEBus *bus, int unit)
2535 {
2536     static int drive_serial = 1;
2537     IDEState *s = &bus->ifs[unit];
2538 
2539     s->bus = bus;
2540     s->unit = unit;
2541     s->drive_serial = drive_serial++;
2542     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2543     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2544     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2545     memset(s->io_buffer, 0, s->io_buffer_total_len);
2546 
2547     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2548     memset(s->smart_selftest_data, 0, 512);
2549 
2550     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2551                                            ide_sector_write_timer_cb, s);
2552 }
2553 
2554 static int ide_nop_int(IDEDMA *dma, int x)
2555 {
2556     return 0;
2557 }
2558 
2559 static void ide_nop(IDEDMA *dma)
2560 {
2561 }
2562 
2563 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2564 {
2565     return 0;
2566 }
2567 
2568 static const IDEDMAOps ide_dma_nop_ops = {
2569     .prepare_buf    = ide_nop_int32,
2570     .restart_dma    = ide_nop,
2571     .rw_buf         = ide_nop_int,
2572 };
2573 
2574 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2575 {
2576     s->unit = s->bus->retry_unit;
2577     ide_set_sector(s, s->bus->retry_sector_num);
2578     s->nsector = s->bus->retry_nsector;
2579     s->bus->dma->ops->restart_dma(s->bus->dma);
2580     s->io_buffer_size = 0;
2581     s->dma_cmd = dma_cmd;
2582     ide_start_dma(s, ide_dma_cb);
2583 }
2584 
2585 static void ide_restart_bh(void *opaque)
2586 {
2587     IDEBus *bus = opaque;
2588     IDEState *s;
2589     bool is_read;
2590     int error_status;
2591 
2592     qemu_bh_delete(bus->bh);
2593     bus->bh = NULL;
2594 
2595     error_status = bus->error_status;
2596     if (bus->error_status == 0) {
2597         return;
2598     }
2599 
2600     s = idebus_active_if(bus);
2601     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2602 
2603     /* The error status must be cleared before resubmitting the request: The
2604      * request may fail again, and this case can only be distinguished if the
2605      * called function can set a new error status. */
2606     bus->error_status = 0;
2607 
2608     /* The HBA has generically asked to be kicked on retry */
2609     if (error_status & IDE_RETRY_HBA) {
2610         if (s->bus->dma->ops->restart) {
2611             s->bus->dma->ops->restart(s->bus->dma);
2612         }
2613     } else if (IS_IDE_RETRY_DMA(error_status)) {
2614         if (error_status & IDE_RETRY_TRIM) {
2615             ide_restart_dma(s, IDE_DMA_TRIM);
2616         } else {
2617             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2618         }
2619     } else if (IS_IDE_RETRY_PIO(error_status)) {
2620         if (is_read) {
2621             ide_sector_read(s);
2622         } else {
2623             ide_sector_write(s);
2624         }
2625     } else if (error_status & IDE_RETRY_FLUSH) {
2626         ide_flush_cache(s);
2627     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2628         assert(s->end_transfer_func == ide_atapi_cmd);
2629         ide_atapi_dma_restart(s);
2630     } else {
2631         abort();
2632     }
2633 }
2634 
2635 static void ide_restart_cb(void *opaque, int running, RunState state)
2636 {
2637     IDEBus *bus = opaque;
2638 
2639     if (!running)
2640         return;
2641 
2642     if (!bus->bh) {
2643         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2644         qemu_bh_schedule(bus->bh);
2645     }
2646 }
2647 
2648 void ide_register_restart_cb(IDEBus *bus)
2649 {
2650     if (bus->dma->ops->restart_dma) {
2651         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2652     }
2653 }
2654 
2655 static IDEDMA ide_dma_nop = {
2656     .ops = &ide_dma_nop_ops,
2657     .aiocb = NULL,
2658 };
2659 
2660 void ide_init2(IDEBus *bus, qemu_irq irq)
2661 {
2662     int i;
2663 
2664     for(i = 0; i < 2; i++) {
2665         ide_init1(bus, i);
2666         ide_reset(&bus->ifs[i]);
2667     }
2668     bus->irq = irq;
2669     bus->dma = &ide_dma_nop;
2670 }
2671 
2672 void ide_exit(IDEState *s)
2673 {
2674     timer_del(s->sector_write_timer);
2675     timer_free(s->sector_write_timer);
2676     qemu_vfree(s->smart_selftest_data);
2677     qemu_vfree(s->io_buffer);
2678 }
2679 
2680 static const MemoryRegionPortio ide_portio_list[] = {
2681     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2682     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2683     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2684     PORTIO_END_OF_LIST(),
2685 };
2686 
2687 static const MemoryRegionPortio ide_portio2_list[] = {
2688     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2689     PORTIO_END_OF_LIST(),
2690 };
2691 
2692 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2693 {
2694     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2695        bridge has been setup properly to always register with ISA.  */
2696     isa_register_portio_list(dev, &bus->portio_list,
2697                              iobase, ide_portio_list, bus, "ide");
2698 
2699     if (iobase2) {
2700         isa_register_portio_list(dev, &bus->portio2_list,
2701                                  iobase2, ide_portio2_list, bus, "ide");
2702     }
2703 }
2704 
2705 static bool is_identify_set(void *opaque, int version_id)
2706 {
2707     IDEState *s = opaque;
2708 
2709     return s->identify_set != 0;
2710 }
2711 
2712 static EndTransferFunc* transfer_end_table[] = {
2713         ide_sector_read,
2714         ide_sector_write,
2715         ide_transfer_stop,
2716         ide_atapi_cmd_reply_end,
2717         ide_atapi_cmd,
2718         ide_dummy_transfer_stop,
2719 };
2720 
2721 static int transfer_end_table_idx(EndTransferFunc *fn)
2722 {
2723     int i;
2724 
2725     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2726         if (transfer_end_table[i] == fn)
2727             return i;
2728 
2729     return -1;
2730 }
2731 
2732 static int ide_drive_post_load(void *opaque, int version_id)
2733 {
2734     IDEState *s = opaque;
2735 
2736     if (s->blk && s->identify_set) {
2737         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2738     }
2739     return 0;
2740 }
2741 
2742 static int ide_drive_pio_post_load(void *opaque, int version_id)
2743 {
2744     IDEState *s = opaque;
2745 
2746     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2747         return -EINVAL;
2748     }
2749     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2750     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2751     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2752     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2753 
2754     return 0;
2755 }
2756 
2757 static int ide_drive_pio_pre_save(void *opaque)
2758 {
2759     IDEState *s = opaque;
2760     int idx;
2761 
2762     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2763     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2764 
2765     idx = transfer_end_table_idx(s->end_transfer_func);
2766     if (idx == -1) {
2767         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2768                         __func__);
2769         s->end_transfer_fn_idx = 2;
2770     } else {
2771         s->end_transfer_fn_idx = idx;
2772     }
2773 
2774     return 0;
2775 }
2776 
2777 static bool ide_drive_pio_state_needed(void *opaque)
2778 {
2779     IDEState *s = opaque;
2780 
2781     return ((s->status & DRQ_STAT) != 0)
2782         || (s->bus->error_status & IDE_RETRY_PIO);
2783 }
2784 
2785 static bool ide_tray_state_needed(void *opaque)
2786 {
2787     IDEState *s = opaque;
2788 
2789     return s->tray_open || s->tray_locked;
2790 }
2791 
2792 static bool ide_atapi_gesn_needed(void *opaque)
2793 {
2794     IDEState *s = opaque;
2795 
2796     return s->events.new_media || s->events.eject_request;
2797 }
2798 
2799 static bool ide_error_needed(void *opaque)
2800 {
2801     IDEBus *bus = opaque;
2802 
2803     return (bus->error_status != 0);
2804 }
2805 
2806 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2807 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2808     .name ="ide_drive/atapi/gesn_state",
2809     .version_id = 1,
2810     .minimum_version_id = 1,
2811     .needed = ide_atapi_gesn_needed,
2812     .fields = (VMStateField[]) {
2813         VMSTATE_BOOL(events.new_media, IDEState),
2814         VMSTATE_BOOL(events.eject_request, IDEState),
2815         VMSTATE_END_OF_LIST()
2816     }
2817 };
2818 
2819 static const VMStateDescription vmstate_ide_tray_state = {
2820     .name = "ide_drive/tray_state",
2821     .version_id = 1,
2822     .minimum_version_id = 1,
2823     .needed = ide_tray_state_needed,
2824     .fields = (VMStateField[]) {
2825         VMSTATE_BOOL(tray_open, IDEState),
2826         VMSTATE_BOOL(tray_locked, IDEState),
2827         VMSTATE_END_OF_LIST()
2828     }
2829 };
2830 
2831 static const VMStateDescription vmstate_ide_drive_pio_state = {
2832     .name = "ide_drive/pio_state",
2833     .version_id = 1,
2834     .minimum_version_id = 1,
2835     .pre_save = ide_drive_pio_pre_save,
2836     .post_load = ide_drive_pio_post_load,
2837     .needed = ide_drive_pio_state_needed,
2838     .fields = (VMStateField[]) {
2839         VMSTATE_INT32(req_nb_sectors, IDEState),
2840         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2841 			     vmstate_info_uint8, uint8_t),
2842         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2843         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2844         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2845         VMSTATE_INT32(elementary_transfer_size, IDEState),
2846         VMSTATE_INT32(packet_transfer_size, IDEState),
2847         VMSTATE_END_OF_LIST()
2848     }
2849 };
2850 
2851 const VMStateDescription vmstate_ide_drive = {
2852     .name = "ide_drive",
2853     .version_id = 3,
2854     .minimum_version_id = 0,
2855     .post_load = ide_drive_post_load,
2856     .fields = (VMStateField[]) {
2857         VMSTATE_INT32(mult_sectors, IDEState),
2858         VMSTATE_INT32(identify_set, IDEState),
2859         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2860         VMSTATE_UINT8(feature, IDEState),
2861         VMSTATE_UINT8(error, IDEState),
2862         VMSTATE_UINT32(nsector, IDEState),
2863         VMSTATE_UINT8(sector, IDEState),
2864         VMSTATE_UINT8(lcyl, IDEState),
2865         VMSTATE_UINT8(hcyl, IDEState),
2866         VMSTATE_UINT8(hob_feature, IDEState),
2867         VMSTATE_UINT8(hob_sector, IDEState),
2868         VMSTATE_UINT8(hob_nsector, IDEState),
2869         VMSTATE_UINT8(hob_lcyl, IDEState),
2870         VMSTATE_UINT8(hob_hcyl, IDEState),
2871         VMSTATE_UINT8(select, IDEState),
2872         VMSTATE_UINT8(status, IDEState),
2873         VMSTATE_UINT8(lba48, IDEState),
2874         VMSTATE_UINT8(sense_key, IDEState),
2875         VMSTATE_UINT8(asc, IDEState),
2876         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2877         VMSTATE_END_OF_LIST()
2878     },
2879     .subsections = (const VMStateDescription*[]) {
2880         &vmstate_ide_drive_pio_state,
2881         &vmstate_ide_tray_state,
2882         &vmstate_ide_atapi_gesn_state,
2883         NULL
2884     }
2885 };
2886 
2887 static const VMStateDescription vmstate_ide_error_status = {
2888     .name ="ide_bus/error",
2889     .version_id = 2,
2890     .minimum_version_id = 1,
2891     .needed = ide_error_needed,
2892     .fields = (VMStateField[]) {
2893         VMSTATE_INT32(error_status, IDEBus),
2894         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2895         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2896         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2897         VMSTATE_END_OF_LIST()
2898     }
2899 };
2900 
2901 const VMStateDescription vmstate_ide_bus = {
2902     .name = "ide_bus",
2903     .version_id = 1,
2904     .minimum_version_id = 1,
2905     .fields = (VMStateField[]) {
2906         VMSTATE_UINT8(cmd, IDEBus),
2907         VMSTATE_UINT8(unit, IDEBus),
2908         VMSTATE_END_OF_LIST()
2909     },
2910     .subsections = (const VMStateDescription*[]) {
2911         &vmstate_ide_error_status,
2912         NULL
2913     }
2914 };
2915 
2916 void ide_drive_get(DriveInfo **hd, int n)
2917 {
2918     int i;
2919 
2920     for (i = 0; i < n; i++) {
2921         hd[i] = drive_get_by_index(IF_IDE, i);
2922     }
2923 }
2924