xref: /openbmc/qemu/hw/ide/core.c (revision a78b1299)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/pci/pci.h"
28 #include "hw/isa/isa.h"
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/blockdev.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 
38 #include "hw/ide/internal.h"
39 #include "trace.h"
40 
41 /* These values were based on a Seagate ST3500418AS but have been modified
42    to make more sense in QEMU */
43 static const int smart_attributes[][12] = {
44     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
45     /* raw read error rate*/
46     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
47     /* spin up */
48     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
49     /* start stop count */
50     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
51     /* remapped sectors */
52     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
53     /* power on hours */
54     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55     /* power cycle count */
56     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
57     /* airflow-temperature-celsius */
58     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
59 };
60 
61 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
62     [IDE_DMA_READ] = "DMA READ",
63     [IDE_DMA_WRITE] = "DMA WRITE",
64     [IDE_DMA_TRIM] = "DMA TRIM",
65     [IDE_DMA_ATAPI] = "DMA ATAPI"
66 };
67 
68 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
69 {
70     if ((unsigned)enval < IDE_DMA__COUNT) {
71         return IDE_DMA_CMD_lookup[enval];
72     }
73     return "DMA UNKNOWN CMD";
74 }
75 
76 static void ide_dummy_transfer_stop(IDEState *s);
77 
78 static void padstr(char *str, const char *src, int len)
79 {
80     int i, v;
81     for(i = 0; i < len; i++) {
82         if (*src)
83             v = *src++;
84         else
85             v = ' ';
86         str[i^1] = v;
87     }
88 }
89 
90 static void put_le16(uint16_t *p, unsigned int v)
91 {
92     *p = cpu_to_le16(v);
93 }
94 
95 static void ide_identify_size(IDEState *s)
96 {
97     uint16_t *p = (uint16_t *)s->identify_data;
98     put_le16(p + 60, s->nb_sectors);
99     put_le16(p + 61, s->nb_sectors >> 16);
100     put_le16(p + 100, s->nb_sectors);
101     put_le16(p + 101, s->nb_sectors >> 16);
102     put_le16(p + 102, s->nb_sectors >> 32);
103     put_le16(p + 103, s->nb_sectors >> 48);
104 }
105 
106 static void ide_identify(IDEState *s)
107 {
108     uint16_t *p;
109     unsigned int oldsize;
110     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
111 
112     p = (uint16_t *)s->identify_data;
113     if (s->identify_set) {
114         goto fill_buffer;
115     }
116     memset(p, 0, sizeof(s->identify_data));
117 
118     put_le16(p + 0, 0x0040);
119     put_le16(p + 1, s->cylinders);
120     put_le16(p + 3, s->heads);
121     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
122     put_le16(p + 5, 512); /* XXX: retired, remove ? */
123     put_le16(p + 6, s->sectors);
124     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
125     put_le16(p + 20, 3); /* XXX: retired, remove ? */
126     put_le16(p + 21, 512); /* cache size in sectors */
127     put_le16(p + 22, 4); /* ecc bytes */
128     padstr((char *)(p + 23), s->version, 8); /* firmware version */
129     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
130 #if MAX_MULT_SECTORS > 1
131     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
132 #endif
133     put_le16(p + 48, 1); /* dword I/O */
134     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
135     put_le16(p + 51, 0x200); /* PIO transfer cycle */
136     put_le16(p + 52, 0x200); /* DMA transfer cycle */
137     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
138     put_le16(p + 54, s->cylinders);
139     put_le16(p + 55, s->heads);
140     put_le16(p + 56, s->sectors);
141     oldsize = s->cylinders * s->heads * s->sectors;
142     put_le16(p + 57, oldsize);
143     put_le16(p + 58, oldsize >> 16);
144     if (s->mult_sectors)
145         put_le16(p + 59, 0x100 | s->mult_sectors);
146     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
147     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
148     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
149     put_le16(p + 63, 0x07); /* mdma0-2 supported */
150     put_le16(p + 64, 0x03); /* pio3-4 supported */
151     put_le16(p + 65, 120);
152     put_le16(p + 66, 120);
153     put_le16(p + 67, 120);
154     put_le16(p + 68, 120);
155     if (dev && dev->conf.discard_granularity) {
156         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
157     }
158 
159     if (s->ncq_queues) {
160         put_le16(p + 75, s->ncq_queues - 1);
161         /* NCQ supported */
162         put_le16(p + 76, (1 << 8));
163     }
164 
165     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
166     put_le16(p + 81, 0x16); /* conforms to ata5 */
167     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
168     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
169     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
170     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
171     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
172     if (s->wwn) {
173         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
174     } else {
175         put_le16(p + 84, (1 << 14) | 0);
176     }
177     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
178     if (blk_enable_write_cache(s->blk)) {
179         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
180     } else {
181         put_le16(p + 85, (1 << 14) | 1);
182     }
183     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
184     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
185     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
186     if (s->wwn) {
187         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
188     } else {
189         put_le16(p + 87, (1 << 14) | 0);
190     }
191     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
192     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
193     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
194     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
195     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
196     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
197 
198     if (dev && dev->conf.physical_block_size)
199         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
200     if (s->wwn) {
201         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
202         put_le16(p + 108, s->wwn >> 48);
203         put_le16(p + 109, s->wwn >> 32);
204         put_le16(p + 110, s->wwn >> 16);
205         put_le16(p + 111, s->wwn);
206     }
207     if (dev && dev->conf.discard_granularity) {
208         put_le16(p + 169, 1); /* TRIM support */
209     }
210     if (dev) {
211         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
212     }
213 
214     ide_identify_size(s);
215     s->identify_set = 1;
216 
217 fill_buffer:
218     memcpy(s->io_buffer, p, sizeof(s->identify_data));
219 }
220 
221 static void ide_atapi_identify(IDEState *s)
222 {
223     uint16_t *p;
224 
225     p = (uint16_t *)s->identify_data;
226     if (s->identify_set) {
227         goto fill_buffer;
228     }
229     memset(p, 0, sizeof(s->identify_data));
230 
231     /* Removable CDROM, 50us response, 12 byte packets */
232     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
233     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
234     put_le16(p + 20, 3); /* buffer type */
235     put_le16(p + 21, 512); /* cache size in sectors */
236     put_le16(p + 22, 4); /* ecc bytes */
237     padstr((char *)(p + 23), s->version, 8); /* firmware version */
238     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
239     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
240 #ifdef USE_DMA_CDROM
241     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
242     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
243     put_le16(p + 62, 7);  /* single word dma0-2 supported */
244     put_le16(p + 63, 7);  /* mdma0-2 supported */
245 #else
246     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
247     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
248     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
249 #endif
250     put_le16(p + 64, 3); /* pio3-4 supported */
251     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
252     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
253     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
254     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
255 
256     put_le16(p + 71, 30); /* in ns */
257     put_le16(p + 72, 30); /* in ns */
258 
259     if (s->ncq_queues) {
260         put_le16(p + 75, s->ncq_queues - 1);
261         /* NCQ supported */
262         put_le16(p + 76, (1 << 8));
263     }
264 
265     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
266     if (s->wwn) {
267         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
268         put_le16(p + 87, (1 << 8)); /* WWN enabled */
269     }
270 
271 #ifdef USE_DMA_CDROM
272     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
273 #endif
274 
275     if (s->wwn) {
276         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
277         put_le16(p + 108, s->wwn >> 48);
278         put_le16(p + 109, s->wwn >> 32);
279         put_le16(p + 110, s->wwn >> 16);
280         put_le16(p + 111, s->wwn);
281     }
282 
283     s->identify_set = 1;
284 
285 fill_buffer:
286     memcpy(s->io_buffer, p, sizeof(s->identify_data));
287 }
288 
289 static void ide_cfata_identify_size(IDEState *s)
290 {
291     uint16_t *p = (uint16_t *)s->identify_data;
292     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
293     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
294     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
295     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
296 }
297 
298 static void ide_cfata_identify(IDEState *s)
299 {
300     uint16_t *p;
301     uint32_t cur_sec;
302 
303     p = (uint16_t *)s->identify_data;
304     if (s->identify_set) {
305         goto fill_buffer;
306     }
307     memset(p, 0, sizeof(s->identify_data));
308 
309     cur_sec = s->cylinders * s->heads * s->sectors;
310 
311     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
312     put_le16(p + 1, s->cylinders);		/* Default cylinders */
313     put_le16(p + 3, s->heads);			/* Default heads */
314     put_le16(p + 6, s->sectors);		/* Default sectors per track */
315     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
316     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
317     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
318     put_le16(p + 22, 0x0004);			/* ECC bytes */
319     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
320     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
321 #if MAX_MULT_SECTORS > 1
322     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
323 #else
324     put_le16(p + 47, 0x0000);
325 #endif
326     put_le16(p + 49, 0x0f00);			/* Capabilities */
327     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
328     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
329     put_le16(p + 53, 0x0003);			/* Translation params valid */
330     put_le16(p + 54, s->cylinders);		/* Current cylinders */
331     put_le16(p + 55, s->heads);			/* Current heads */
332     put_le16(p + 56, s->sectors);		/* Current sectors */
333     put_le16(p + 57, cur_sec);			/* Current capacity */
334     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
335     if (s->mult_sectors)			/* Multiple sector setting */
336         put_le16(p + 59, 0x100 | s->mult_sectors);
337     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
338     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
339     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
340     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
341     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
342     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
343     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
344     put_le16(p + 82, 0x400c);			/* Command Set supported */
345     put_le16(p + 83, 0x7068);			/* Command Set supported */
346     put_le16(p + 84, 0x4000);			/* Features supported */
347     put_le16(p + 85, 0x000c);			/* Command Set enabled */
348     put_le16(p + 86, 0x7044);			/* Command Set enabled */
349     put_le16(p + 87, 0x4000);			/* Features enabled */
350     put_le16(p + 91, 0x4060);			/* Current APM level */
351     put_le16(p + 129, 0x0002);			/* Current features option */
352     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
353     put_le16(p + 131, 0x0001);			/* Initial power mode */
354     put_le16(p + 132, 0x0000);			/* User signature */
355     put_le16(p + 160, 0x8100);			/* Power requirement */
356     put_le16(p + 161, 0x8001);			/* CF command set */
357 
358     ide_cfata_identify_size(s);
359     s->identify_set = 1;
360 
361 fill_buffer:
362     memcpy(s->io_buffer, p, sizeof(s->identify_data));
363 }
364 
365 static void ide_set_signature(IDEState *s)
366 {
367     s->select &= 0xf0; /* clear head */
368     /* put signature */
369     s->nsector = 1;
370     s->sector = 1;
371     if (s->drive_kind == IDE_CD) {
372         s->lcyl = 0x14;
373         s->hcyl = 0xeb;
374     } else if (s->blk) {
375         s->lcyl = 0;
376         s->hcyl = 0;
377     } else {
378         s->lcyl = 0xff;
379         s->hcyl = 0xff;
380     }
381 }
382 
383 static bool ide_sect_range_ok(IDEState *s,
384                               uint64_t sector, uint64_t nb_sectors)
385 {
386     uint64_t total_sectors;
387 
388     blk_get_geometry(s->blk, &total_sectors);
389     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
390         return false;
391     }
392     return true;
393 }
394 
395 typedef struct TrimAIOCB {
396     BlockAIOCB common;
397     IDEState *s;
398     QEMUBH *bh;
399     int ret;
400     QEMUIOVector *qiov;
401     BlockAIOCB *aiocb;
402     int i, j;
403     bool is_invalid;
404 } TrimAIOCB;
405 
406 static void trim_aio_cancel(BlockAIOCB *acb)
407 {
408     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
409 
410     /* Exit the loop so ide_issue_trim_cb will not continue  */
411     iocb->j = iocb->qiov->niov - 1;
412     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
413 
414     iocb->ret = -ECANCELED;
415 
416     if (iocb->aiocb) {
417         blk_aio_cancel_async(iocb->aiocb);
418         iocb->aiocb = NULL;
419     }
420 }
421 
422 static const AIOCBInfo trim_aiocb_info = {
423     .aiocb_size         = sizeof(TrimAIOCB),
424     .cancel_async       = trim_aio_cancel,
425 };
426 
427 static void ide_trim_bh_cb(void *opaque)
428 {
429     TrimAIOCB *iocb = opaque;
430 
431     if (iocb->is_invalid) {
432         ide_dma_error(iocb->s);
433     } else {
434         iocb->common.cb(iocb->common.opaque, iocb->ret);
435     }
436     qemu_bh_delete(iocb->bh);
437     iocb->bh = NULL;
438     qemu_aio_unref(iocb);
439 }
440 
441 static void ide_issue_trim_cb(void *opaque, int ret)
442 {
443     TrimAIOCB *iocb = opaque;
444     IDEState *s = iocb->s;
445 
446     if (ret >= 0) {
447         while (iocb->j < iocb->qiov->niov) {
448             int j = iocb->j;
449             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
450                 int i = iocb->i;
451                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
452 
453                 /* 6-byte LBA + 2-byte range per entry */
454                 uint64_t entry = le64_to_cpu(buffer[i]);
455                 uint64_t sector = entry & 0x0000ffffffffffffULL;
456                 uint16_t count = entry >> 48;
457 
458                 if (count == 0) {
459                     continue;
460                 }
461 
462                 if (!ide_sect_range_ok(s, sector, count)) {
463                     iocb->is_invalid = true;
464                     goto done;
465                 }
466 
467                 /* Got an entry! Submit and exit.  */
468                 iocb->aiocb = blk_aio_pdiscard(s->blk,
469                                                sector << BDRV_SECTOR_BITS,
470                                                count << BDRV_SECTOR_BITS,
471                                                ide_issue_trim_cb, opaque);
472                 return;
473             }
474 
475             iocb->j++;
476             iocb->i = -1;
477         }
478     } else {
479         iocb->ret = ret;
480     }
481 
482 done:
483     iocb->aiocb = NULL;
484     if (iocb->bh) {
485         qemu_bh_schedule(iocb->bh);
486     }
487 }
488 
489 BlockAIOCB *ide_issue_trim(
490         int64_t offset, QEMUIOVector *qiov,
491         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
492 {
493     IDEState *s = opaque;
494     TrimAIOCB *iocb;
495 
496     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
497     iocb->s = s;
498     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
499     iocb->ret = 0;
500     iocb->qiov = qiov;
501     iocb->i = -1;
502     iocb->j = 0;
503     iocb->is_invalid = false;
504     ide_issue_trim_cb(iocb, 0);
505     return &iocb->common;
506 }
507 
508 void ide_abort_command(IDEState *s)
509 {
510     ide_transfer_stop(s);
511     s->status = READY_STAT | ERR_STAT;
512     s->error = ABRT_ERR;
513 }
514 
515 static void ide_set_retry(IDEState *s)
516 {
517     s->bus->retry_unit = s->unit;
518     s->bus->retry_sector_num = ide_get_sector(s);
519     s->bus->retry_nsector = s->nsector;
520 }
521 
522 static void ide_clear_retry(IDEState *s)
523 {
524     s->bus->retry_unit = -1;
525     s->bus->retry_sector_num = 0;
526     s->bus->retry_nsector = 0;
527 }
528 
529 /* prepare data transfer and tell what to do after */
530 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
531                         EndTransferFunc *end_transfer_func)
532 {
533     s->end_transfer_func = end_transfer_func;
534     s->data_ptr = buf;
535     s->data_end = buf + size;
536     ide_set_retry(s);
537     if (!(s->status & ERR_STAT)) {
538         s->status |= DRQ_STAT;
539     }
540     if (s->bus->dma->ops->start_transfer) {
541         s->bus->dma->ops->start_transfer(s->bus->dma);
542     }
543 }
544 
545 static void ide_cmd_done(IDEState *s)
546 {
547     if (s->bus->dma->ops->cmd_done) {
548         s->bus->dma->ops->cmd_done(s->bus->dma);
549     }
550 }
551 
552 static void ide_transfer_halt(IDEState *s,
553                               void(*end_transfer_func)(IDEState *),
554                               bool notify)
555 {
556     s->end_transfer_func = end_transfer_func;
557     s->data_ptr = s->io_buffer;
558     s->data_end = s->io_buffer;
559     s->status &= ~DRQ_STAT;
560     if (notify) {
561         ide_cmd_done(s);
562     }
563 }
564 
565 void ide_transfer_stop(IDEState *s)
566 {
567     ide_transfer_halt(s, ide_transfer_stop, true);
568 }
569 
570 static void ide_transfer_cancel(IDEState *s)
571 {
572     ide_transfer_halt(s, ide_transfer_cancel, false);
573 }
574 
575 int64_t ide_get_sector(IDEState *s)
576 {
577     int64_t sector_num;
578     if (s->select & 0x40) {
579         /* lba */
580 	if (!s->lba48) {
581 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
582 		(s->lcyl << 8) | s->sector;
583 	} else {
584 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
585 		((int64_t) s->hob_lcyl << 32) |
586 		((int64_t) s->hob_sector << 24) |
587 		((int64_t) s->hcyl << 16) |
588 		((int64_t) s->lcyl << 8) | s->sector;
589 	}
590     } else {
591         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
592             (s->select & 0x0f) * s->sectors + (s->sector - 1);
593     }
594     return sector_num;
595 }
596 
597 void ide_set_sector(IDEState *s, int64_t sector_num)
598 {
599     unsigned int cyl, r;
600     if (s->select & 0x40) {
601 	if (!s->lba48) {
602             s->select = (s->select & 0xf0) | (sector_num >> 24);
603             s->hcyl = (sector_num >> 16);
604             s->lcyl = (sector_num >> 8);
605             s->sector = (sector_num);
606 	} else {
607 	    s->sector = sector_num;
608 	    s->lcyl = sector_num >> 8;
609 	    s->hcyl = sector_num >> 16;
610 	    s->hob_sector = sector_num >> 24;
611 	    s->hob_lcyl = sector_num >> 32;
612 	    s->hob_hcyl = sector_num >> 40;
613 	}
614     } else {
615         cyl = sector_num / (s->heads * s->sectors);
616         r = sector_num % (s->heads * s->sectors);
617         s->hcyl = cyl >> 8;
618         s->lcyl = cyl;
619         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
620         s->sector = (r % s->sectors) + 1;
621     }
622 }
623 
624 static void ide_rw_error(IDEState *s) {
625     ide_abort_command(s);
626     ide_set_irq(s->bus);
627 }
628 
629 static void ide_buffered_readv_cb(void *opaque, int ret)
630 {
631     IDEBufferedRequest *req = opaque;
632     if (!req->orphaned) {
633         if (!ret) {
634             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
635                                 req->original_qiov->size);
636         }
637         req->original_cb(req->original_opaque, ret);
638     }
639     QLIST_REMOVE(req, list);
640     qemu_vfree(req->iov.iov_base);
641     g_free(req);
642 }
643 
644 #define MAX_BUFFERED_REQS 16
645 
646 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
647                                QEMUIOVector *iov, int nb_sectors,
648                                BlockCompletionFunc *cb, void *opaque)
649 {
650     BlockAIOCB *aioreq;
651     IDEBufferedRequest *req;
652     int c = 0;
653 
654     QLIST_FOREACH(req, &s->buffered_requests, list) {
655         c++;
656     }
657     if (c > MAX_BUFFERED_REQS) {
658         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
659     }
660 
661     req = g_new0(IDEBufferedRequest, 1);
662     req->original_qiov = iov;
663     req->original_cb = cb;
664     req->original_opaque = opaque;
665     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
666     req->iov.iov_len = iov->size;
667     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
668 
669     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
670                             &req->qiov, 0, ide_buffered_readv_cb, req);
671 
672     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
673     return aioreq;
674 }
675 
676 /**
677  * Cancel all pending DMA requests.
678  * Any buffered DMA requests are instantly canceled,
679  * but any pending unbuffered DMA requests must be waited on.
680  */
681 void ide_cancel_dma_sync(IDEState *s)
682 {
683     IDEBufferedRequest *req;
684 
685     /* First invoke the callbacks of all buffered requests
686      * and flag those requests as orphaned. Ideally there
687      * are no unbuffered (Scatter Gather DMA Requests or
688      * write requests) pending and we can avoid to drain. */
689     QLIST_FOREACH(req, &s->buffered_requests, list) {
690         if (!req->orphaned) {
691             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
692             req->original_cb(req->original_opaque, -ECANCELED);
693         }
694         req->orphaned = true;
695     }
696 
697     /*
698      * We can't cancel Scatter Gather DMA in the middle of the
699      * operation or a partial (not full) DMA transfer would reach
700      * the storage so we wait for completion instead (we beahve
701      * like if the DMA was completed by the time the guest trying
702      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
703      * set).
704      *
705      * In the future we'll be able to safely cancel the I/O if the
706      * whole DMA operation will be submitted to disk with a single
707      * aio operation with preadv/pwritev.
708      */
709     if (s->bus->dma->aiocb) {
710         trace_ide_cancel_dma_sync_remaining();
711         blk_drain(s->blk);
712         assert(s->bus->dma->aiocb == NULL);
713     }
714 }
715 
716 static void ide_sector_read(IDEState *s);
717 
718 static void ide_sector_read_cb(void *opaque, int ret)
719 {
720     IDEState *s = opaque;
721     int n;
722 
723     s->pio_aiocb = NULL;
724     s->status &= ~BUSY_STAT;
725 
726     if (ret == -ECANCELED) {
727         return;
728     }
729     if (ret != 0) {
730         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
731                                 IDE_RETRY_READ)) {
732             return;
733         }
734     }
735 
736     block_acct_done(blk_get_stats(s->blk), &s->acct);
737 
738     n = s->nsector;
739     if (n > s->req_nb_sectors) {
740         n = s->req_nb_sectors;
741     }
742 
743     ide_set_sector(s, ide_get_sector(s) + n);
744     s->nsector -= n;
745     /* Allow the guest to read the io_buffer */
746     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
747     ide_set_irq(s->bus);
748 }
749 
750 static void ide_sector_read(IDEState *s)
751 {
752     int64_t sector_num;
753     int n;
754 
755     s->status = READY_STAT | SEEK_STAT;
756     s->error = 0; /* not needed by IDE spec, but needed by Windows */
757     sector_num = ide_get_sector(s);
758     n = s->nsector;
759 
760     if (n == 0) {
761         ide_transfer_stop(s);
762         return;
763     }
764 
765     s->status |= BUSY_STAT;
766 
767     if (n > s->req_nb_sectors) {
768         n = s->req_nb_sectors;
769     }
770 
771     trace_ide_sector_read(sector_num, n);
772 
773     if (!ide_sect_range_ok(s, sector_num, n)) {
774         ide_rw_error(s);
775         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
776         return;
777     }
778 
779     s->iov.iov_base = s->io_buffer;
780     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
781     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
782 
783     block_acct_start(blk_get_stats(s->blk), &s->acct,
784                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
785     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
786                                       ide_sector_read_cb, s);
787 }
788 
789 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
790 {
791     if (s->bus->dma->ops->commit_buf) {
792         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
793     }
794     s->io_buffer_offset += tx_bytes;
795     qemu_sglist_destroy(&s->sg);
796 }
797 
798 void ide_set_inactive(IDEState *s, bool more)
799 {
800     s->bus->dma->aiocb = NULL;
801     ide_clear_retry(s);
802     if (s->bus->dma->ops->set_inactive) {
803         s->bus->dma->ops->set_inactive(s->bus->dma, more);
804     }
805     ide_cmd_done(s);
806 }
807 
808 void ide_dma_error(IDEState *s)
809 {
810     dma_buf_commit(s, 0);
811     ide_abort_command(s);
812     ide_set_inactive(s, false);
813     ide_set_irq(s->bus);
814 }
815 
816 int ide_handle_rw_error(IDEState *s, int error, int op)
817 {
818     bool is_read = (op & IDE_RETRY_READ) != 0;
819     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
820 
821     if (action == BLOCK_ERROR_ACTION_STOP) {
822         assert(s->bus->retry_unit == s->unit);
823         s->bus->error_status = op;
824     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
825         block_acct_failed(blk_get_stats(s->blk), &s->acct);
826         if (IS_IDE_RETRY_DMA(op)) {
827             ide_dma_error(s);
828         } else if (IS_IDE_RETRY_ATAPI(op)) {
829             ide_atapi_io_error(s, -error);
830         } else {
831             ide_rw_error(s);
832         }
833     }
834     blk_error_action(s->blk, action, is_read, error);
835     return action != BLOCK_ERROR_ACTION_IGNORE;
836 }
837 
838 static void ide_dma_cb(void *opaque, int ret)
839 {
840     IDEState *s = opaque;
841     int n;
842     int64_t sector_num;
843     uint64_t offset;
844     bool stay_active = false;
845 
846     if (ret == -ECANCELED) {
847         return;
848     }
849     if (ret < 0) {
850         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
851             s->bus->dma->aiocb = NULL;
852             dma_buf_commit(s, 0);
853             return;
854         }
855     }
856 
857     n = s->io_buffer_size >> 9;
858     if (n > s->nsector) {
859         /* The PRDs were longer than needed for this request. Shorten them so
860          * we don't get a negative remainder. The Active bit must remain set
861          * after the request completes. */
862         n = s->nsector;
863         stay_active = true;
864     }
865 
866     sector_num = ide_get_sector(s);
867     if (n > 0) {
868         assert(n * 512 == s->sg.size);
869         dma_buf_commit(s, s->sg.size);
870         sector_num += n;
871         ide_set_sector(s, sector_num);
872         s->nsector -= n;
873     }
874 
875     /* end of transfer ? */
876     if (s->nsector == 0) {
877         s->status = READY_STAT | SEEK_STAT;
878         ide_set_irq(s->bus);
879         goto eot;
880     }
881 
882     /* launch next transfer */
883     n = s->nsector;
884     s->io_buffer_index = 0;
885     s->io_buffer_size = n * 512;
886     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
887         /* The PRDs were too short. Reset the Active bit, but don't raise an
888          * interrupt. */
889         s->status = READY_STAT | SEEK_STAT;
890         dma_buf_commit(s, 0);
891         goto eot;
892     }
893 
894     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
895 
896     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
897         !ide_sect_range_ok(s, sector_num, n)) {
898         ide_dma_error(s);
899         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
900         return;
901     }
902 
903     offset = sector_num << BDRV_SECTOR_BITS;
904     switch (s->dma_cmd) {
905     case IDE_DMA_READ:
906         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
907                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
908         break;
909     case IDE_DMA_WRITE:
910         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
911                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
912         break;
913     case IDE_DMA_TRIM:
914         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
915                                         &s->sg, offset, BDRV_SECTOR_SIZE,
916                                         ide_issue_trim, s, ide_dma_cb, s,
917                                         DMA_DIRECTION_TO_DEVICE);
918         break;
919     default:
920         abort();
921     }
922     return;
923 
924 eot:
925     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
926         block_acct_done(blk_get_stats(s->blk), &s->acct);
927     }
928     ide_set_inactive(s, stay_active);
929 }
930 
931 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
932 {
933     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
934     s->io_buffer_size = 0;
935     s->dma_cmd = dma_cmd;
936 
937     switch (dma_cmd) {
938     case IDE_DMA_READ:
939         block_acct_start(blk_get_stats(s->blk), &s->acct,
940                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
941         break;
942     case IDE_DMA_WRITE:
943         block_acct_start(blk_get_stats(s->blk), &s->acct,
944                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
945         break;
946     default:
947         break;
948     }
949 
950     ide_start_dma(s, ide_dma_cb);
951 }
952 
953 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
954 {
955     s->io_buffer_index = 0;
956     ide_set_retry(s);
957     if (s->bus->dma->ops->start_dma) {
958         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
959     }
960 }
961 
962 static void ide_sector_write(IDEState *s);
963 
964 static void ide_sector_write_timer_cb(void *opaque)
965 {
966     IDEState *s = opaque;
967     ide_set_irq(s->bus);
968 }
969 
970 static void ide_sector_write_cb(void *opaque, int ret)
971 {
972     IDEState *s = opaque;
973     int n;
974 
975     if (ret == -ECANCELED) {
976         return;
977     }
978 
979     s->pio_aiocb = NULL;
980     s->status &= ~BUSY_STAT;
981 
982     if (ret != 0) {
983         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
984             return;
985         }
986     }
987 
988     block_acct_done(blk_get_stats(s->blk), &s->acct);
989 
990     n = s->nsector;
991     if (n > s->req_nb_sectors) {
992         n = s->req_nb_sectors;
993     }
994     s->nsector -= n;
995 
996     ide_set_sector(s, ide_get_sector(s) + n);
997     if (s->nsector == 0) {
998         /* no more sectors to write */
999         ide_transfer_stop(s);
1000     } else {
1001         int n1 = s->nsector;
1002         if (n1 > s->req_nb_sectors) {
1003             n1 = s->req_nb_sectors;
1004         }
1005         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1006                            ide_sector_write);
1007     }
1008 
1009     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1010         /* It seems there is a bug in the Windows 2000 installer HDD
1011            IDE driver which fills the disk with empty logs when the
1012            IDE write IRQ comes too early. This hack tries to correct
1013            that at the expense of slower write performances. Use this
1014            option _only_ to install Windows 2000. You must disable it
1015            for normal use. */
1016         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1017                   (NANOSECONDS_PER_SECOND / 1000));
1018     } else {
1019         ide_set_irq(s->bus);
1020     }
1021 }
1022 
1023 static void ide_sector_write(IDEState *s)
1024 {
1025     int64_t sector_num;
1026     int n;
1027 
1028     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1029     sector_num = ide_get_sector(s);
1030 
1031     n = s->nsector;
1032     if (n > s->req_nb_sectors) {
1033         n = s->req_nb_sectors;
1034     }
1035 
1036     trace_ide_sector_write(sector_num, n);
1037 
1038     if (!ide_sect_range_ok(s, sector_num, n)) {
1039         ide_rw_error(s);
1040         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1041         return;
1042     }
1043 
1044     s->iov.iov_base = s->io_buffer;
1045     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1046     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1047 
1048     block_acct_start(blk_get_stats(s->blk), &s->acct,
1049                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1050     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1051                                    &s->qiov, 0, ide_sector_write_cb, s);
1052 }
1053 
1054 static void ide_flush_cb(void *opaque, int ret)
1055 {
1056     IDEState *s = opaque;
1057 
1058     s->pio_aiocb = NULL;
1059 
1060     if (ret == -ECANCELED) {
1061         return;
1062     }
1063     if (ret < 0) {
1064         /* XXX: What sector number to set here? */
1065         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1066             return;
1067         }
1068     }
1069 
1070     if (s->blk) {
1071         block_acct_done(blk_get_stats(s->blk), &s->acct);
1072     }
1073     s->status = READY_STAT | SEEK_STAT;
1074     ide_cmd_done(s);
1075     ide_set_irq(s->bus);
1076 }
1077 
1078 static void ide_flush_cache(IDEState *s)
1079 {
1080     if (s->blk == NULL) {
1081         ide_flush_cb(s, 0);
1082         return;
1083     }
1084 
1085     s->status |= BUSY_STAT;
1086     ide_set_retry(s);
1087     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1088 
1089     if (blk_bs(s->blk)) {
1090         s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1091     } else {
1092         /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1093          * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1094          */
1095         ide_flush_cb(s, 0);
1096     }
1097 }
1098 
1099 static void ide_cfata_metadata_inquiry(IDEState *s)
1100 {
1101     uint16_t *p;
1102     uint32_t spd;
1103 
1104     p = (uint16_t *) s->io_buffer;
1105     memset(p, 0, 0x200);
1106     spd = ((s->mdata_size - 1) >> 9) + 1;
1107 
1108     put_le16(p + 0, 0x0001);			/* Data format revision */
1109     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1110     put_le16(p + 2, s->media_changed);		/* Media status */
1111     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1112     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1113     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1114     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1115 }
1116 
1117 static void ide_cfata_metadata_read(IDEState *s)
1118 {
1119     uint16_t *p;
1120 
1121     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1122         s->status = ERR_STAT;
1123         s->error = ABRT_ERR;
1124         return;
1125     }
1126 
1127     p = (uint16_t *) s->io_buffer;
1128     memset(p, 0, 0x200);
1129 
1130     put_le16(p + 0, s->media_changed);		/* Media status */
1131     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1132                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1133                                     s->nsector << 9), 0x200 - 2));
1134 }
1135 
1136 static void ide_cfata_metadata_write(IDEState *s)
1137 {
1138     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1139         s->status = ERR_STAT;
1140         s->error = ABRT_ERR;
1141         return;
1142     }
1143 
1144     s->media_changed = 0;
1145 
1146     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1147                     s->io_buffer + 2,
1148                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1149                                     s->nsector << 9), 0x200 - 2));
1150 }
1151 
1152 /* called when the inserted state of the media has changed */
1153 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1154 {
1155     IDEState *s = opaque;
1156     uint64_t nb_sectors;
1157 
1158     s->tray_open = !load;
1159     blk_get_geometry(s->blk, &nb_sectors);
1160     s->nb_sectors = nb_sectors;
1161 
1162     /*
1163      * First indicate to the guest that a CD has been removed.  That's
1164      * done on the next command the guest sends us.
1165      *
1166      * Then we set UNIT_ATTENTION, by which the guest will
1167      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1168      */
1169     s->cdrom_changed = 1;
1170     s->events.new_media = true;
1171     s->events.eject_request = false;
1172     ide_set_irq(s->bus);
1173 }
1174 
1175 static void ide_cd_eject_request_cb(void *opaque, bool force)
1176 {
1177     IDEState *s = opaque;
1178 
1179     s->events.eject_request = true;
1180     if (force) {
1181         s->tray_locked = false;
1182     }
1183     ide_set_irq(s->bus);
1184 }
1185 
1186 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1187 {
1188     s->lba48 = lba48;
1189 
1190     /* handle the 'magic' 0 nsector count conversion here. to avoid
1191      * fiddling with the rest of the read logic, we just store the
1192      * full sector count in ->nsector and ignore ->hob_nsector from now
1193      */
1194     if (!s->lba48) {
1195 	if (!s->nsector)
1196 	    s->nsector = 256;
1197     } else {
1198 	if (!s->nsector && !s->hob_nsector)
1199 	    s->nsector = 65536;
1200 	else {
1201 	    int lo = s->nsector;
1202 	    int hi = s->hob_nsector;
1203 
1204 	    s->nsector = (hi << 8) | lo;
1205 	}
1206     }
1207 }
1208 
1209 static void ide_clear_hob(IDEBus *bus)
1210 {
1211     /* any write clears HOB high bit of device control register */
1212     bus->ifs[0].select &= ~(1 << 7);
1213     bus->ifs[1].select &= ~(1 << 7);
1214 }
1215 
1216 /* IOport [W]rite [R]egisters */
1217 enum ATA_IOPORT_WR {
1218     ATA_IOPORT_WR_DATA = 0,
1219     ATA_IOPORT_WR_FEATURES = 1,
1220     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1221     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1222     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1223     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1224     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1225     ATA_IOPORT_WR_COMMAND = 7,
1226     ATA_IOPORT_WR_NUM_REGISTERS,
1227 };
1228 
1229 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1230     [ATA_IOPORT_WR_DATA] = "Data",
1231     [ATA_IOPORT_WR_FEATURES] = "Features",
1232     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1233     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1234     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1235     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1236     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1237     [ATA_IOPORT_WR_COMMAND] = "Command"
1238 };
1239 
1240 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1241 {
1242     IDEBus *bus = opaque;
1243     IDEState *s = idebus_active_if(bus);
1244     int reg_num = addr & 7;
1245 
1246     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1247 
1248     /* ignore writes to command block while busy with previous command */
1249     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1250         return;
1251     }
1252 
1253     switch (reg_num) {
1254     case 0:
1255         break;
1256     case ATA_IOPORT_WR_FEATURES:
1257         ide_clear_hob(bus);
1258         /* NOTE: data is written to the two drives */
1259         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1260         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1261         bus->ifs[0].feature = val;
1262         bus->ifs[1].feature = val;
1263         break;
1264     case ATA_IOPORT_WR_SECTOR_COUNT:
1265 	ide_clear_hob(bus);
1266 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1267 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1268         bus->ifs[0].nsector = val;
1269         bus->ifs[1].nsector = val;
1270         break;
1271     case ATA_IOPORT_WR_SECTOR_NUMBER:
1272 	ide_clear_hob(bus);
1273 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1274 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1275         bus->ifs[0].sector = val;
1276         bus->ifs[1].sector = val;
1277         break;
1278     case ATA_IOPORT_WR_CYLINDER_LOW:
1279 	ide_clear_hob(bus);
1280 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1281 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1282         bus->ifs[0].lcyl = val;
1283         bus->ifs[1].lcyl = val;
1284         break;
1285     case ATA_IOPORT_WR_CYLINDER_HIGH:
1286 	ide_clear_hob(bus);
1287 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1288 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1289         bus->ifs[0].hcyl = val;
1290         bus->ifs[1].hcyl = val;
1291         break;
1292     case ATA_IOPORT_WR_DEVICE_HEAD:
1293 	/* FIXME: HOB readback uses bit 7 */
1294         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1295         bus->ifs[1].select = (val | 0x10) | 0xa0;
1296         /* select drive */
1297         bus->unit = (val >> 4) & 1;
1298         break;
1299     default:
1300     case ATA_IOPORT_WR_COMMAND:
1301         /* command */
1302         ide_exec_cmd(bus, val);
1303         break;
1304     }
1305 }
1306 
1307 static void ide_reset(IDEState *s)
1308 {
1309     trace_ide_reset(s);
1310 
1311     if (s->pio_aiocb) {
1312         blk_aio_cancel(s->pio_aiocb);
1313         s->pio_aiocb = NULL;
1314     }
1315 
1316     if (s->drive_kind == IDE_CFATA)
1317         s->mult_sectors = 0;
1318     else
1319         s->mult_sectors = MAX_MULT_SECTORS;
1320     /* ide regs */
1321     s->feature = 0;
1322     s->error = 0;
1323     s->nsector = 0;
1324     s->sector = 0;
1325     s->lcyl = 0;
1326     s->hcyl = 0;
1327 
1328     /* lba48 */
1329     s->hob_feature = 0;
1330     s->hob_sector = 0;
1331     s->hob_nsector = 0;
1332     s->hob_lcyl = 0;
1333     s->hob_hcyl = 0;
1334 
1335     s->select = 0xa0;
1336     s->status = READY_STAT | SEEK_STAT;
1337 
1338     s->lba48 = 0;
1339 
1340     /* ATAPI specific */
1341     s->sense_key = 0;
1342     s->asc = 0;
1343     s->cdrom_changed = 0;
1344     s->packet_transfer_size = 0;
1345     s->elementary_transfer_size = 0;
1346     s->io_buffer_index = 0;
1347     s->cd_sector_size = 0;
1348     s->atapi_dma = 0;
1349     s->tray_locked = 0;
1350     s->tray_open = 0;
1351     /* ATA DMA state */
1352     s->io_buffer_size = 0;
1353     s->req_nb_sectors = 0;
1354 
1355     ide_set_signature(s);
1356     /* init the transfer handler so that 0xffff is returned on data
1357        accesses */
1358     s->end_transfer_func = ide_dummy_transfer_stop;
1359     ide_dummy_transfer_stop(s);
1360     s->media_changed = 0;
1361 }
1362 
1363 static bool cmd_nop(IDEState *s, uint8_t cmd)
1364 {
1365     return true;
1366 }
1367 
1368 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1369 {
1370     /* Halt PIO (in the DRQ phase), then DMA */
1371     ide_transfer_cancel(s);
1372     ide_cancel_dma_sync(s);
1373 
1374     /* Reset any PIO commands, reset signature, etc */
1375     ide_reset(s);
1376 
1377     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1378      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1379     s->status = 0x00;
1380 
1381     /* Do not overwrite status register */
1382     return false;
1383 }
1384 
1385 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1386 {
1387     switch (s->feature) {
1388     case DSM_TRIM:
1389         if (s->blk) {
1390             ide_sector_start_dma(s, IDE_DMA_TRIM);
1391             return false;
1392         }
1393         break;
1394     }
1395 
1396     ide_abort_command(s);
1397     return true;
1398 }
1399 
1400 static bool cmd_identify(IDEState *s, uint8_t cmd)
1401 {
1402     if (s->blk && s->drive_kind != IDE_CD) {
1403         if (s->drive_kind != IDE_CFATA) {
1404             ide_identify(s);
1405         } else {
1406             ide_cfata_identify(s);
1407         }
1408         s->status = READY_STAT | SEEK_STAT;
1409         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1410         ide_set_irq(s->bus);
1411         return false;
1412     } else {
1413         if (s->drive_kind == IDE_CD) {
1414             ide_set_signature(s);
1415         }
1416         ide_abort_command(s);
1417     }
1418 
1419     return true;
1420 }
1421 
1422 static bool cmd_verify(IDEState *s, uint8_t cmd)
1423 {
1424     bool lba48 = (cmd == WIN_VERIFY_EXT);
1425 
1426     /* do sector number check ? */
1427     ide_cmd_lba48_transform(s, lba48);
1428 
1429     return true;
1430 }
1431 
1432 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1433 {
1434     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1435         /* Disable Read and Write Multiple */
1436         s->mult_sectors = 0;
1437     } else if ((s->nsector & 0xff) != 0 &&
1438         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1439          (s->nsector & (s->nsector - 1)) != 0)) {
1440         ide_abort_command(s);
1441     } else {
1442         s->mult_sectors = s->nsector & 0xff;
1443     }
1444 
1445     return true;
1446 }
1447 
1448 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1449 {
1450     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1451 
1452     if (!s->blk || !s->mult_sectors) {
1453         ide_abort_command(s);
1454         return true;
1455     }
1456 
1457     ide_cmd_lba48_transform(s, lba48);
1458     s->req_nb_sectors = s->mult_sectors;
1459     ide_sector_read(s);
1460     return false;
1461 }
1462 
1463 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1464 {
1465     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1466     int n;
1467 
1468     if (!s->blk || !s->mult_sectors) {
1469         ide_abort_command(s);
1470         return true;
1471     }
1472 
1473     ide_cmd_lba48_transform(s, lba48);
1474 
1475     s->req_nb_sectors = s->mult_sectors;
1476     n = MIN(s->nsector, s->req_nb_sectors);
1477 
1478     s->status = SEEK_STAT | READY_STAT;
1479     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1480 
1481     s->media_changed = 1;
1482 
1483     return false;
1484 }
1485 
1486 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1487 {
1488     bool lba48 = (cmd == WIN_READ_EXT);
1489 
1490     if (s->drive_kind == IDE_CD) {
1491         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1492         ide_abort_command(s);
1493         return true;
1494     }
1495 
1496     if (!s->blk) {
1497         ide_abort_command(s);
1498         return true;
1499     }
1500 
1501     ide_cmd_lba48_transform(s, lba48);
1502     s->req_nb_sectors = 1;
1503     ide_sector_read(s);
1504 
1505     return false;
1506 }
1507 
1508 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1509 {
1510     bool lba48 = (cmd == WIN_WRITE_EXT);
1511 
1512     if (!s->blk) {
1513         ide_abort_command(s);
1514         return true;
1515     }
1516 
1517     ide_cmd_lba48_transform(s, lba48);
1518 
1519     s->req_nb_sectors = 1;
1520     s->status = SEEK_STAT | READY_STAT;
1521     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1522 
1523     s->media_changed = 1;
1524 
1525     return false;
1526 }
1527 
1528 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1529 {
1530     bool lba48 = (cmd == WIN_READDMA_EXT);
1531 
1532     if (!s->blk) {
1533         ide_abort_command(s);
1534         return true;
1535     }
1536 
1537     ide_cmd_lba48_transform(s, lba48);
1538     ide_sector_start_dma(s, IDE_DMA_READ);
1539 
1540     return false;
1541 }
1542 
1543 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1544 {
1545     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1546 
1547     if (!s->blk) {
1548         ide_abort_command(s);
1549         return true;
1550     }
1551 
1552     ide_cmd_lba48_transform(s, lba48);
1553     ide_sector_start_dma(s, IDE_DMA_WRITE);
1554 
1555     s->media_changed = 1;
1556 
1557     return false;
1558 }
1559 
1560 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1561 {
1562     ide_flush_cache(s);
1563     return false;
1564 }
1565 
1566 static bool cmd_seek(IDEState *s, uint8_t cmd)
1567 {
1568     /* XXX: Check that seek is within bounds */
1569     return true;
1570 }
1571 
1572 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1573 {
1574     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1575 
1576     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1577     if (s->nb_sectors == 0) {
1578         ide_abort_command(s);
1579         return true;
1580     }
1581 
1582     ide_cmd_lba48_transform(s, lba48);
1583     ide_set_sector(s, s->nb_sectors - 1);
1584 
1585     return true;
1586 }
1587 
1588 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1589 {
1590     s->nsector = 0xff; /* device active or idle */
1591     return true;
1592 }
1593 
1594 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1595 {
1596     uint16_t *identify_data;
1597 
1598     if (!s->blk) {
1599         ide_abort_command(s);
1600         return true;
1601     }
1602 
1603     /* XXX: valid for CDROM ? */
1604     switch (s->feature) {
1605     case 0x02: /* write cache enable */
1606         blk_set_enable_write_cache(s->blk, true);
1607         identify_data = (uint16_t *)s->identify_data;
1608         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1609         return true;
1610     case 0x82: /* write cache disable */
1611         blk_set_enable_write_cache(s->blk, false);
1612         identify_data = (uint16_t *)s->identify_data;
1613         put_le16(identify_data + 85, (1 << 14) | 1);
1614         ide_flush_cache(s);
1615         return false;
1616     case 0xcc: /* reverting to power-on defaults enable */
1617     case 0x66: /* reverting to power-on defaults disable */
1618     case 0xaa: /* read look-ahead enable */
1619     case 0x55: /* read look-ahead disable */
1620     case 0x05: /* set advanced power management mode */
1621     case 0x85: /* disable advanced power management mode */
1622     case 0x69: /* NOP */
1623     case 0x67: /* NOP */
1624     case 0x96: /* NOP */
1625     case 0x9a: /* NOP */
1626     case 0x42: /* enable Automatic Acoustic Mode */
1627     case 0xc2: /* disable Automatic Acoustic Mode */
1628         return true;
1629     case 0x03: /* set transfer mode */
1630         {
1631             uint8_t val = s->nsector & 0x07;
1632             identify_data = (uint16_t *)s->identify_data;
1633 
1634             switch (s->nsector >> 3) {
1635             case 0x00: /* pio default */
1636             case 0x01: /* pio mode */
1637                 put_le16(identify_data + 62, 0x07);
1638                 put_le16(identify_data + 63, 0x07);
1639                 put_le16(identify_data + 88, 0x3f);
1640                 break;
1641             case 0x02: /* sigle word dma mode*/
1642                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1643                 put_le16(identify_data + 63, 0x07);
1644                 put_le16(identify_data + 88, 0x3f);
1645                 break;
1646             case 0x04: /* mdma mode */
1647                 put_le16(identify_data + 62, 0x07);
1648                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1649                 put_le16(identify_data + 88, 0x3f);
1650                 break;
1651             case 0x08: /* udma mode */
1652                 put_le16(identify_data + 62, 0x07);
1653                 put_le16(identify_data + 63, 0x07);
1654                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1655                 break;
1656             default:
1657                 goto abort_cmd;
1658             }
1659             return true;
1660         }
1661     }
1662 
1663 abort_cmd:
1664     ide_abort_command(s);
1665     return true;
1666 }
1667 
1668 
1669 /*** ATAPI commands ***/
1670 
1671 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1672 {
1673     ide_atapi_identify(s);
1674     s->status = READY_STAT | SEEK_STAT;
1675     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1676     ide_set_irq(s->bus);
1677     return false;
1678 }
1679 
1680 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1681 {
1682     ide_set_signature(s);
1683 
1684     if (s->drive_kind == IDE_CD) {
1685         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1686                         * devices to return a clear status register
1687                         * with READY_STAT *not* set. */
1688         s->error = 0x01;
1689     } else {
1690         s->status = READY_STAT | SEEK_STAT;
1691         /* The bits of the error register are not as usual for this command!
1692          * They are part of the regular output (this is why ERR_STAT isn't set)
1693          * Device 0 passed, Device 1 passed or not present. */
1694         s->error = 0x01;
1695         ide_set_irq(s->bus);
1696     }
1697 
1698     return false;
1699 }
1700 
1701 static bool cmd_packet(IDEState *s, uint8_t cmd)
1702 {
1703     /* overlapping commands not supported */
1704     if (s->feature & 0x02) {
1705         ide_abort_command(s);
1706         return true;
1707     }
1708 
1709     s->status = READY_STAT | SEEK_STAT;
1710     s->atapi_dma = s->feature & 1;
1711     if (s->atapi_dma) {
1712         s->dma_cmd = IDE_DMA_ATAPI;
1713     }
1714     s->nsector = 1;
1715     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1716                        ide_atapi_cmd);
1717     return false;
1718 }
1719 
1720 
1721 /*** CF-ATA commands ***/
1722 
1723 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1724 {
1725     s->error = 0x09;    /* miscellaneous error */
1726     s->status = READY_STAT | SEEK_STAT;
1727     ide_set_irq(s->bus);
1728 
1729     return false;
1730 }
1731 
1732 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1733 {
1734     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1735      * required for Windows 8 to work with AHCI */
1736 
1737     if (cmd == CFA_WEAR_LEVEL) {
1738         s->nsector = 0;
1739     }
1740 
1741     if (cmd == CFA_ERASE_SECTORS) {
1742         s->media_changed = 1;
1743     }
1744 
1745     return true;
1746 }
1747 
1748 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1749 {
1750     s->status = READY_STAT | SEEK_STAT;
1751 
1752     memset(s->io_buffer, 0, 0x200);
1753     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1754     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1755     s->io_buffer[0x02] = s->select;                 /* Head */
1756     s->io_buffer[0x03] = s->sector;                 /* Sector */
1757     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1758     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1759     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1760     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1761     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1762     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1763     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1764 
1765     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1766     ide_set_irq(s->bus);
1767 
1768     return false;
1769 }
1770 
1771 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1772 {
1773     switch (s->feature) {
1774     case 0x02:  /* Inquiry Metadata Storage */
1775         ide_cfata_metadata_inquiry(s);
1776         break;
1777     case 0x03:  /* Read Metadata Storage */
1778         ide_cfata_metadata_read(s);
1779         break;
1780     case 0x04:  /* Write Metadata Storage */
1781         ide_cfata_metadata_write(s);
1782         break;
1783     default:
1784         ide_abort_command(s);
1785         return true;
1786     }
1787 
1788     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1789     s->status = 0x00; /* NOTE: READY is _not_ set */
1790     ide_set_irq(s->bus);
1791 
1792     return false;
1793 }
1794 
1795 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1796 {
1797     switch (s->feature) {
1798     case 0x01:  /* sense temperature in device */
1799         s->nsector = 0x50;      /* +20 C */
1800         break;
1801     default:
1802         ide_abort_command(s);
1803         return true;
1804     }
1805 
1806     return true;
1807 }
1808 
1809 
1810 /*** SMART commands ***/
1811 
1812 static bool cmd_smart(IDEState *s, uint8_t cmd)
1813 {
1814     int n;
1815 
1816     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1817         goto abort_cmd;
1818     }
1819 
1820     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1821         goto abort_cmd;
1822     }
1823 
1824     switch (s->feature) {
1825     case SMART_DISABLE:
1826         s->smart_enabled = 0;
1827         return true;
1828 
1829     case SMART_ENABLE:
1830         s->smart_enabled = 1;
1831         return true;
1832 
1833     case SMART_ATTR_AUTOSAVE:
1834         switch (s->sector) {
1835         case 0x00:
1836             s->smart_autosave = 0;
1837             break;
1838         case 0xf1:
1839             s->smart_autosave = 1;
1840             break;
1841         default:
1842             goto abort_cmd;
1843         }
1844         return true;
1845 
1846     case SMART_STATUS:
1847         if (!s->smart_errors) {
1848             s->hcyl = 0xc2;
1849             s->lcyl = 0x4f;
1850         } else {
1851             s->hcyl = 0x2c;
1852             s->lcyl = 0xf4;
1853         }
1854         return true;
1855 
1856     case SMART_READ_THRESH:
1857         memset(s->io_buffer, 0, 0x200);
1858         s->io_buffer[0] = 0x01; /* smart struct version */
1859 
1860         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1861             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1862             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1863         }
1864 
1865         /* checksum */
1866         for (n = 0; n < 511; n++) {
1867             s->io_buffer[511] += s->io_buffer[n];
1868         }
1869         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1870 
1871         s->status = READY_STAT | SEEK_STAT;
1872         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1873         ide_set_irq(s->bus);
1874         return false;
1875 
1876     case SMART_READ_DATA:
1877         memset(s->io_buffer, 0, 0x200);
1878         s->io_buffer[0] = 0x01; /* smart struct version */
1879 
1880         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1881             int i;
1882             for (i = 0; i < 11; i++) {
1883                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1884             }
1885         }
1886 
1887         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1888         if (s->smart_selftest_count == 0) {
1889             s->io_buffer[363] = 0;
1890         } else {
1891             s->io_buffer[363] =
1892                 s->smart_selftest_data[3 +
1893                            (s->smart_selftest_count - 1) *
1894                            24];
1895         }
1896         s->io_buffer[364] = 0x20;
1897         s->io_buffer[365] = 0x01;
1898         /* offline data collection capacity: execute + self-test*/
1899         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1900         s->io_buffer[368] = 0x03; /* smart capability (1) */
1901         s->io_buffer[369] = 0x00; /* smart capability (2) */
1902         s->io_buffer[370] = 0x01; /* error logging supported */
1903         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1904         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1905         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1906 
1907         for (n = 0; n < 511; n++) {
1908             s->io_buffer[511] += s->io_buffer[n];
1909         }
1910         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1911 
1912         s->status = READY_STAT | SEEK_STAT;
1913         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1914         ide_set_irq(s->bus);
1915         return false;
1916 
1917     case SMART_READ_LOG:
1918         switch (s->sector) {
1919         case 0x01: /* summary smart error log */
1920             memset(s->io_buffer, 0, 0x200);
1921             s->io_buffer[0] = 0x01;
1922             s->io_buffer[1] = 0x00; /* no error entries */
1923             s->io_buffer[452] = s->smart_errors & 0xff;
1924             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1925 
1926             for (n = 0; n < 511; n++) {
1927                 s->io_buffer[511] += s->io_buffer[n];
1928             }
1929             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1930             break;
1931         case 0x06: /* smart self test log */
1932             memset(s->io_buffer, 0, 0x200);
1933             s->io_buffer[0] = 0x01;
1934             if (s->smart_selftest_count == 0) {
1935                 s->io_buffer[508] = 0;
1936             } else {
1937                 s->io_buffer[508] = s->smart_selftest_count;
1938                 for (n = 2; n < 506; n++)  {
1939                     s->io_buffer[n] = s->smart_selftest_data[n];
1940                 }
1941             }
1942 
1943             for (n = 0; n < 511; n++) {
1944                 s->io_buffer[511] += s->io_buffer[n];
1945             }
1946             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1947             break;
1948         default:
1949             goto abort_cmd;
1950         }
1951         s->status = READY_STAT | SEEK_STAT;
1952         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1953         ide_set_irq(s->bus);
1954         return false;
1955 
1956     case SMART_EXECUTE_OFFLINE:
1957         switch (s->sector) {
1958         case 0: /* off-line routine */
1959         case 1: /* short self test */
1960         case 2: /* extended self test */
1961             s->smart_selftest_count++;
1962             if (s->smart_selftest_count > 21) {
1963                 s->smart_selftest_count = 1;
1964             }
1965             n = 2 + (s->smart_selftest_count - 1) * 24;
1966             s->smart_selftest_data[n] = s->sector;
1967             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1968             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1969             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1970             break;
1971         default:
1972             goto abort_cmd;
1973         }
1974         return true;
1975     }
1976 
1977 abort_cmd:
1978     ide_abort_command(s);
1979     return true;
1980 }
1981 
1982 #define HD_OK (1u << IDE_HD)
1983 #define CD_OK (1u << IDE_CD)
1984 #define CFA_OK (1u << IDE_CFATA)
1985 #define HD_CFA_OK (HD_OK | CFA_OK)
1986 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1987 
1988 /* Set the Disk Seek Completed status bit during completion */
1989 #define SET_DSC (1u << 8)
1990 
1991 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1992 static const struct {
1993     /* Returns true if the completion code should be run */
1994     bool (*handler)(IDEState *s, uint8_t cmd);
1995     int flags;
1996 } ide_cmd_table[0x100] = {
1997     /* NOP not implemented, mandatory for CD */
1998     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1999     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2000     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2001     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2002     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2003     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2004     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2005     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2006     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2007     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2008     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2009     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2010     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2011     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2012     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2013     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2014     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2015     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2016     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2017     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2018     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2019     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2020     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2021     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2022     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2023     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2024     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2025     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2026     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2027     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2028     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2029     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2030     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2031     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2032     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2033     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2034     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2035     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2036     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2037     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2038     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2039     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2040     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2041     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2042     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2043     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2044     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2045     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2046     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2047     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2048     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2049     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2050     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2051     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2052     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2053     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2054 };
2055 
2056 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2057 {
2058     return cmd < ARRAY_SIZE(ide_cmd_table)
2059         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2060 }
2061 
2062 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2063 {
2064     IDEState *s;
2065     bool complete;
2066 
2067     s = idebus_active_if(bus);
2068     trace_ide_exec_cmd(bus, s, val);
2069 
2070     /* ignore commands to non existent slave */
2071     if (s != bus->ifs && !s->blk) {
2072         return;
2073     }
2074 
2075     /* Only RESET is allowed while BSY and/or DRQ are set,
2076      * and only to ATAPI devices. */
2077     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2078         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2079             return;
2080         }
2081     }
2082 
2083     if (!ide_cmd_permitted(s, val)) {
2084         ide_abort_command(s);
2085         ide_set_irq(s->bus);
2086         return;
2087     }
2088 
2089     s->status = READY_STAT | BUSY_STAT;
2090     s->error = 0;
2091     s->io_buffer_offset = 0;
2092 
2093     complete = ide_cmd_table[val].handler(s, val);
2094     if (complete) {
2095         s->status &= ~BUSY_STAT;
2096         assert(!!s->error == !!(s->status & ERR_STAT));
2097 
2098         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2099             s->status |= SEEK_STAT;
2100         }
2101 
2102         ide_cmd_done(s);
2103         ide_set_irq(s->bus);
2104     }
2105 }
2106 
2107 /* IOport [R]ead [R]egisters */
2108 enum ATA_IOPORT_RR {
2109     ATA_IOPORT_RR_DATA = 0,
2110     ATA_IOPORT_RR_ERROR = 1,
2111     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2112     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2113     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2114     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2115     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2116     ATA_IOPORT_RR_STATUS = 7,
2117     ATA_IOPORT_RR_NUM_REGISTERS,
2118 };
2119 
2120 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2121     [ATA_IOPORT_RR_DATA] = "Data",
2122     [ATA_IOPORT_RR_ERROR] = "Error",
2123     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2124     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2125     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2126     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2127     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2128     [ATA_IOPORT_RR_STATUS] = "Status"
2129 };
2130 
2131 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2132 {
2133     IDEBus *bus = opaque;
2134     IDEState *s = idebus_active_if(bus);
2135     uint32_t reg_num;
2136     int ret, hob;
2137 
2138     reg_num = addr & 7;
2139     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2140     //hob = s->select & (1 << 7);
2141     hob = 0;
2142     switch (reg_num) {
2143     case ATA_IOPORT_RR_DATA:
2144         ret = 0xff;
2145         break;
2146     case ATA_IOPORT_RR_ERROR:
2147         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2148             (s != bus->ifs && !s->blk)) {
2149             ret = 0;
2150         } else if (!hob) {
2151             ret = s->error;
2152         } else {
2153 	    ret = s->hob_feature;
2154         }
2155         break;
2156     case ATA_IOPORT_RR_SECTOR_COUNT:
2157         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2158             ret = 0;
2159         } else if (!hob) {
2160             ret = s->nsector & 0xff;
2161         } else {
2162 	    ret = s->hob_nsector;
2163         }
2164         break;
2165     case ATA_IOPORT_RR_SECTOR_NUMBER:
2166         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2167             ret = 0;
2168         } else if (!hob) {
2169             ret = s->sector;
2170         } else {
2171 	    ret = s->hob_sector;
2172         }
2173         break;
2174     case ATA_IOPORT_RR_CYLINDER_LOW:
2175         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2176             ret = 0;
2177         } else if (!hob) {
2178             ret = s->lcyl;
2179         } else {
2180 	    ret = s->hob_lcyl;
2181         }
2182         break;
2183     case ATA_IOPORT_RR_CYLINDER_HIGH:
2184         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2185             ret = 0;
2186         } else if (!hob) {
2187             ret = s->hcyl;
2188         } else {
2189 	    ret = s->hob_hcyl;
2190         }
2191         break;
2192     case ATA_IOPORT_RR_DEVICE_HEAD:
2193         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2194             ret = 0;
2195         } else {
2196             ret = s->select;
2197         }
2198         break;
2199     default:
2200     case ATA_IOPORT_RR_STATUS:
2201         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2202             (s != bus->ifs && !s->blk)) {
2203             ret = 0;
2204         } else {
2205             ret = s->status;
2206         }
2207         qemu_irq_lower(bus->irq);
2208         break;
2209     }
2210 
2211     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2212     return ret;
2213 }
2214 
2215 uint32_t ide_status_read(void *opaque, uint32_t addr)
2216 {
2217     IDEBus *bus = opaque;
2218     IDEState *s = idebus_active_if(bus);
2219     int ret;
2220 
2221     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2222         (s != bus->ifs && !s->blk)) {
2223         ret = 0;
2224     } else {
2225         ret = s->status;
2226     }
2227 
2228     trace_ide_status_read(addr, ret, bus, s);
2229     return ret;
2230 }
2231 
2232 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2233 {
2234     IDEBus *bus = opaque;
2235     IDEState *s;
2236     int i;
2237 
2238     trace_ide_cmd_write(addr, val, bus);
2239 
2240     /* common for both drives */
2241     if (!(bus->cmd & IDE_CMD_RESET) &&
2242         (val & IDE_CMD_RESET)) {
2243         /* reset low to high */
2244         for(i = 0;i < 2; i++) {
2245             s = &bus->ifs[i];
2246             s->status = BUSY_STAT | SEEK_STAT;
2247             s->error = 0x01;
2248         }
2249     } else if ((bus->cmd & IDE_CMD_RESET) &&
2250                !(val & IDE_CMD_RESET)) {
2251         /* high to low */
2252         for(i = 0;i < 2; i++) {
2253             s = &bus->ifs[i];
2254             if (s->drive_kind == IDE_CD)
2255                 s->status = 0x00; /* NOTE: READY is _not_ set */
2256             else
2257                 s->status = READY_STAT | SEEK_STAT;
2258             ide_set_signature(s);
2259         }
2260     }
2261 
2262     bus->cmd = val;
2263 }
2264 
2265 /*
2266  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2267  * transferred from the device to the guest), false if it's a PIO in
2268  */
2269 static bool ide_is_pio_out(IDEState *s)
2270 {
2271     if (s->end_transfer_func == ide_sector_write ||
2272         s->end_transfer_func == ide_atapi_cmd) {
2273         return false;
2274     } else if (s->end_transfer_func == ide_sector_read ||
2275                s->end_transfer_func == ide_transfer_stop ||
2276                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2277                s->end_transfer_func == ide_dummy_transfer_stop) {
2278         return true;
2279     }
2280 
2281     abort();
2282 }
2283 
2284 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2285 {
2286     IDEBus *bus = opaque;
2287     IDEState *s = idebus_active_if(bus);
2288     uint8_t *p;
2289 
2290     trace_ide_data_writew(addr, val, bus, s);
2291 
2292     /* PIO data access allowed only when DRQ bit is set. The result of a write
2293      * during PIO out is indeterminate, just ignore it. */
2294     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2295         return;
2296     }
2297 
2298     p = s->data_ptr;
2299     if (p + 2 > s->data_end) {
2300         return;
2301     }
2302 
2303     *(uint16_t *)p = le16_to_cpu(val);
2304     p += 2;
2305     s->data_ptr = p;
2306     if (p >= s->data_end) {
2307         s->status &= ~DRQ_STAT;
2308         s->end_transfer_func(s);
2309     }
2310 }
2311 
2312 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2313 {
2314     IDEBus *bus = opaque;
2315     IDEState *s = idebus_active_if(bus);
2316     uint8_t *p;
2317     int ret;
2318 
2319     /* PIO data access allowed only when DRQ bit is set. The result of a read
2320      * during PIO in is indeterminate, return 0 and don't move forward. */
2321     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2322         return 0;
2323     }
2324 
2325     p = s->data_ptr;
2326     if (p + 2 > s->data_end) {
2327         return 0;
2328     }
2329 
2330     ret = cpu_to_le16(*(uint16_t *)p);
2331     p += 2;
2332     s->data_ptr = p;
2333     if (p >= s->data_end) {
2334         s->status &= ~DRQ_STAT;
2335         s->end_transfer_func(s);
2336     }
2337 
2338     trace_ide_data_readw(addr, ret, bus, s);
2339     return ret;
2340 }
2341 
2342 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2343 {
2344     IDEBus *bus = opaque;
2345     IDEState *s = idebus_active_if(bus);
2346     uint8_t *p;
2347 
2348     trace_ide_data_writel(addr, val, bus, s);
2349 
2350     /* PIO data access allowed only when DRQ bit is set. The result of a write
2351      * during PIO out is indeterminate, just ignore it. */
2352     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2353         return;
2354     }
2355 
2356     p = s->data_ptr;
2357     if (p + 4 > s->data_end) {
2358         return;
2359     }
2360 
2361     *(uint32_t *)p = le32_to_cpu(val);
2362     p += 4;
2363     s->data_ptr = p;
2364     if (p >= s->data_end) {
2365         s->status &= ~DRQ_STAT;
2366         s->end_transfer_func(s);
2367     }
2368 }
2369 
2370 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2371 {
2372     IDEBus *bus = opaque;
2373     IDEState *s = idebus_active_if(bus);
2374     uint8_t *p;
2375     int ret;
2376 
2377     /* PIO data access allowed only when DRQ bit is set. The result of a read
2378      * during PIO in is indeterminate, return 0 and don't move forward. */
2379     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2380         ret = 0;
2381         goto out;
2382     }
2383 
2384     p = s->data_ptr;
2385     if (p + 4 > s->data_end) {
2386         return 0;
2387     }
2388 
2389     ret = cpu_to_le32(*(uint32_t *)p);
2390     p += 4;
2391     s->data_ptr = p;
2392     if (p >= s->data_end) {
2393         s->status &= ~DRQ_STAT;
2394         s->end_transfer_func(s);
2395     }
2396 
2397 out:
2398     trace_ide_data_readl(addr, ret, bus, s);
2399     return ret;
2400 }
2401 
2402 static void ide_dummy_transfer_stop(IDEState *s)
2403 {
2404     s->data_ptr = s->io_buffer;
2405     s->data_end = s->io_buffer;
2406     s->io_buffer[0] = 0xff;
2407     s->io_buffer[1] = 0xff;
2408     s->io_buffer[2] = 0xff;
2409     s->io_buffer[3] = 0xff;
2410 }
2411 
2412 void ide_bus_reset(IDEBus *bus)
2413 {
2414     bus->unit = 0;
2415     bus->cmd = 0;
2416     ide_reset(&bus->ifs[0]);
2417     ide_reset(&bus->ifs[1]);
2418     ide_clear_hob(bus);
2419 
2420     /* pending async DMA */
2421     if (bus->dma->aiocb) {
2422         trace_ide_bus_reset_aio();
2423         blk_aio_cancel(bus->dma->aiocb);
2424         bus->dma->aiocb = NULL;
2425     }
2426 
2427     /* reset dma provider too */
2428     if (bus->dma->ops->reset) {
2429         bus->dma->ops->reset(bus->dma);
2430     }
2431 }
2432 
2433 static bool ide_cd_is_tray_open(void *opaque)
2434 {
2435     return ((IDEState *)opaque)->tray_open;
2436 }
2437 
2438 static bool ide_cd_is_medium_locked(void *opaque)
2439 {
2440     return ((IDEState *)opaque)->tray_locked;
2441 }
2442 
2443 static void ide_resize_cb(void *opaque)
2444 {
2445     IDEState *s = opaque;
2446     uint64_t nb_sectors;
2447 
2448     if (!s->identify_set) {
2449         return;
2450     }
2451 
2452     blk_get_geometry(s->blk, &nb_sectors);
2453     s->nb_sectors = nb_sectors;
2454 
2455     /* Update the identify data buffer. */
2456     if (s->drive_kind == IDE_CFATA) {
2457         ide_cfata_identify_size(s);
2458     } else {
2459         /* IDE_CD uses a different set of callbacks entirely. */
2460         assert(s->drive_kind != IDE_CD);
2461         ide_identify_size(s);
2462     }
2463 }
2464 
2465 static const BlockDevOps ide_cd_block_ops = {
2466     .change_media_cb = ide_cd_change_cb,
2467     .eject_request_cb = ide_cd_eject_request_cb,
2468     .is_tray_open = ide_cd_is_tray_open,
2469     .is_medium_locked = ide_cd_is_medium_locked,
2470 };
2471 
2472 static const BlockDevOps ide_hd_block_ops = {
2473     .resize_cb = ide_resize_cb,
2474 };
2475 
2476 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2477                    const char *version, const char *serial, const char *model,
2478                    uint64_t wwn,
2479                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2480                    int chs_trans, Error **errp)
2481 {
2482     uint64_t nb_sectors;
2483 
2484     s->blk = blk;
2485     s->drive_kind = kind;
2486 
2487     blk_get_geometry(blk, &nb_sectors);
2488     s->cylinders = cylinders;
2489     s->heads = heads;
2490     s->sectors = secs;
2491     s->chs_trans = chs_trans;
2492     s->nb_sectors = nb_sectors;
2493     s->wwn = wwn;
2494     /* The SMART values should be preserved across power cycles
2495        but they aren't.  */
2496     s->smart_enabled = 1;
2497     s->smart_autosave = 1;
2498     s->smart_errors = 0;
2499     s->smart_selftest_count = 0;
2500     if (kind == IDE_CD) {
2501         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2502         blk_set_guest_block_size(blk, 2048);
2503     } else {
2504         if (!blk_is_inserted(s->blk)) {
2505             error_setg(errp, "Device needs media, but drive is empty");
2506             return -1;
2507         }
2508         if (blk_is_read_only(blk)) {
2509             error_setg(errp, "Can't use a read-only drive");
2510             return -1;
2511         }
2512         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2513     }
2514     if (serial) {
2515         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2516     } else {
2517         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2518                  "QM%05d", s->drive_serial);
2519     }
2520     if (model) {
2521         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2522     } else {
2523         switch (kind) {
2524         case IDE_CD:
2525             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2526             break;
2527         case IDE_CFATA:
2528             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2529             break;
2530         default:
2531             strcpy(s->drive_model_str, "QEMU HARDDISK");
2532             break;
2533         }
2534     }
2535 
2536     if (version) {
2537         pstrcpy(s->version, sizeof(s->version), version);
2538     } else {
2539         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2540     }
2541 
2542     ide_reset(s);
2543     blk_iostatus_enable(blk);
2544     return 0;
2545 }
2546 
2547 static void ide_init1(IDEBus *bus, int unit)
2548 {
2549     static int drive_serial = 1;
2550     IDEState *s = &bus->ifs[unit];
2551 
2552     s->bus = bus;
2553     s->unit = unit;
2554     s->drive_serial = drive_serial++;
2555     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2556     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2557     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2558     memset(s->io_buffer, 0, s->io_buffer_total_len);
2559 
2560     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2561     memset(s->smart_selftest_data, 0, 512);
2562 
2563     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2564                                            ide_sector_write_timer_cb, s);
2565 }
2566 
2567 static int ide_nop_int(IDEDMA *dma, int x)
2568 {
2569     return 0;
2570 }
2571 
2572 static void ide_nop(IDEDMA *dma)
2573 {
2574 }
2575 
2576 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2577 {
2578     return 0;
2579 }
2580 
2581 static const IDEDMAOps ide_dma_nop_ops = {
2582     .prepare_buf    = ide_nop_int32,
2583     .restart_dma    = ide_nop,
2584     .rw_buf         = ide_nop_int,
2585 };
2586 
2587 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2588 {
2589     s->unit = s->bus->retry_unit;
2590     ide_set_sector(s, s->bus->retry_sector_num);
2591     s->nsector = s->bus->retry_nsector;
2592     s->bus->dma->ops->restart_dma(s->bus->dma);
2593     s->io_buffer_size = 0;
2594     s->dma_cmd = dma_cmd;
2595     ide_start_dma(s, ide_dma_cb);
2596 }
2597 
2598 static void ide_restart_bh(void *opaque)
2599 {
2600     IDEBus *bus = opaque;
2601     IDEState *s;
2602     bool is_read;
2603     int error_status;
2604 
2605     qemu_bh_delete(bus->bh);
2606     bus->bh = NULL;
2607 
2608     error_status = bus->error_status;
2609     if (bus->error_status == 0) {
2610         return;
2611     }
2612 
2613     s = idebus_active_if(bus);
2614     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2615 
2616     /* The error status must be cleared before resubmitting the request: The
2617      * request may fail again, and this case can only be distinguished if the
2618      * called function can set a new error status. */
2619     bus->error_status = 0;
2620 
2621     /* The HBA has generically asked to be kicked on retry */
2622     if (error_status & IDE_RETRY_HBA) {
2623         if (s->bus->dma->ops->restart) {
2624             s->bus->dma->ops->restart(s->bus->dma);
2625         }
2626     } else if (IS_IDE_RETRY_DMA(error_status)) {
2627         if (error_status & IDE_RETRY_TRIM) {
2628             ide_restart_dma(s, IDE_DMA_TRIM);
2629         } else {
2630             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2631         }
2632     } else if (IS_IDE_RETRY_PIO(error_status)) {
2633         if (is_read) {
2634             ide_sector_read(s);
2635         } else {
2636             ide_sector_write(s);
2637         }
2638     } else if (error_status & IDE_RETRY_FLUSH) {
2639         ide_flush_cache(s);
2640     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2641         assert(s->end_transfer_func == ide_atapi_cmd);
2642         ide_atapi_dma_restart(s);
2643     } else {
2644         abort();
2645     }
2646 }
2647 
2648 static void ide_restart_cb(void *opaque, int running, RunState state)
2649 {
2650     IDEBus *bus = opaque;
2651 
2652     if (!running)
2653         return;
2654 
2655     if (!bus->bh) {
2656         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2657         qemu_bh_schedule(bus->bh);
2658     }
2659 }
2660 
2661 void ide_register_restart_cb(IDEBus *bus)
2662 {
2663     if (bus->dma->ops->restart_dma) {
2664         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2665     }
2666 }
2667 
2668 static IDEDMA ide_dma_nop = {
2669     .ops = &ide_dma_nop_ops,
2670     .aiocb = NULL,
2671 };
2672 
2673 void ide_init2(IDEBus *bus, qemu_irq irq)
2674 {
2675     int i;
2676 
2677     for(i = 0; i < 2; i++) {
2678         ide_init1(bus, i);
2679         ide_reset(&bus->ifs[i]);
2680     }
2681     bus->irq = irq;
2682     bus->dma = &ide_dma_nop;
2683 }
2684 
2685 void ide_exit(IDEState *s)
2686 {
2687     timer_del(s->sector_write_timer);
2688     timer_free(s->sector_write_timer);
2689     qemu_vfree(s->smart_selftest_data);
2690     qemu_vfree(s->io_buffer);
2691 }
2692 
2693 static const MemoryRegionPortio ide_portio_list[] = {
2694     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2695     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2696     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2697     PORTIO_END_OF_LIST(),
2698 };
2699 
2700 static const MemoryRegionPortio ide_portio2_list[] = {
2701     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2702     PORTIO_END_OF_LIST(),
2703 };
2704 
2705 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2706 {
2707     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2708        bridge has been setup properly to always register with ISA.  */
2709     isa_register_portio_list(dev, &bus->portio_list,
2710                              iobase, ide_portio_list, bus, "ide");
2711 
2712     if (iobase2) {
2713         isa_register_portio_list(dev, &bus->portio2_list,
2714                                  iobase2, ide_portio2_list, bus, "ide");
2715     }
2716 }
2717 
2718 static bool is_identify_set(void *opaque, int version_id)
2719 {
2720     IDEState *s = opaque;
2721 
2722     return s->identify_set != 0;
2723 }
2724 
2725 static EndTransferFunc* transfer_end_table[] = {
2726         ide_sector_read,
2727         ide_sector_write,
2728         ide_transfer_stop,
2729         ide_atapi_cmd_reply_end,
2730         ide_atapi_cmd,
2731         ide_dummy_transfer_stop,
2732 };
2733 
2734 static int transfer_end_table_idx(EndTransferFunc *fn)
2735 {
2736     int i;
2737 
2738     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2739         if (transfer_end_table[i] == fn)
2740             return i;
2741 
2742     return -1;
2743 }
2744 
2745 static int ide_drive_post_load(void *opaque, int version_id)
2746 {
2747     IDEState *s = opaque;
2748 
2749     if (s->blk && s->identify_set) {
2750         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2751     }
2752     return 0;
2753 }
2754 
2755 static int ide_drive_pio_post_load(void *opaque, int version_id)
2756 {
2757     IDEState *s = opaque;
2758 
2759     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2760         return -EINVAL;
2761     }
2762     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2763     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2764     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2765     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2766 
2767     return 0;
2768 }
2769 
2770 static int ide_drive_pio_pre_save(void *opaque)
2771 {
2772     IDEState *s = opaque;
2773     int idx;
2774 
2775     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2776     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2777 
2778     idx = transfer_end_table_idx(s->end_transfer_func);
2779     if (idx == -1) {
2780         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2781                         __func__);
2782         s->end_transfer_fn_idx = 2;
2783     } else {
2784         s->end_transfer_fn_idx = idx;
2785     }
2786 
2787     return 0;
2788 }
2789 
2790 static bool ide_drive_pio_state_needed(void *opaque)
2791 {
2792     IDEState *s = opaque;
2793 
2794     return ((s->status & DRQ_STAT) != 0)
2795         || (s->bus->error_status & IDE_RETRY_PIO);
2796 }
2797 
2798 static bool ide_tray_state_needed(void *opaque)
2799 {
2800     IDEState *s = opaque;
2801 
2802     return s->tray_open || s->tray_locked;
2803 }
2804 
2805 static bool ide_atapi_gesn_needed(void *opaque)
2806 {
2807     IDEState *s = opaque;
2808 
2809     return s->events.new_media || s->events.eject_request;
2810 }
2811 
2812 static bool ide_error_needed(void *opaque)
2813 {
2814     IDEBus *bus = opaque;
2815 
2816     return (bus->error_status != 0);
2817 }
2818 
2819 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2820 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2821     .name ="ide_drive/atapi/gesn_state",
2822     .version_id = 1,
2823     .minimum_version_id = 1,
2824     .needed = ide_atapi_gesn_needed,
2825     .fields = (VMStateField[]) {
2826         VMSTATE_BOOL(events.new_media, IDEState),
2827         VMSTATE_BOOL(events.eject_request, IDEState),
2828         VMSTATE_END_OF_LIST()
2829     }
2830 };
2831 
2832 static const VMStateDescription vmstate_ide_tray_state = {
2833     .name = "ide_drive/tray_state",
2834     .version_id = 1,
2835     .minimum_version_id = 1,
2836     .needed = ide_tray_state_needed,
2837     .fields = (VMStateField[]) {
2838         VMSTATE_BOOL(tray_open, IDEState),
2839         VMSTATE_BOOL(tray_locked, IDEState),
2840         VMSTATE_END_OF_LIST()
2841     }
2842 };
2843 
2844 static const VMStateDescription vmstate_ide_drive_pio_state = {
2845     .name = "ide_drive/pio_state",
2846     .version_id = 1,
2847     .minimum_version_id = 1,
2848     .pre_save = ide_drive_pio_pre_save,
2849     .post_load = ide_drive_pio_post_load,
2850     .needed = ide_drive_pio_state_needed,
2851     .fields = (VMStateField[]) {
2852         VMSTATE_INT32(req_nb_sectors, IDEState),
2853         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2854 			     vmstate_info_uint8, uint8_t),
2855         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2856         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2857         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2858         VMSTATE_INT32(elementary_transfer_size, IDEState),
2859         VMSTATE_INT32(packet_transfer_size, IDEState),
2860         VMSTATE_END_OF_LIST()
2861     }
2862 };
2863 
2864 const VMStateDescription vmstate_ide_drive = {
2865     .name = "ide_drive",
2866     .version_id = 3,
2867     .minimum_version_id = 0,
2868     .post_load = ide_drive_post_load,
2869     .fields = (VMStateField[]) {
2870         VMSTATE_INT32(mult_sectors, IDEState),
2871         VMSTATE_INT32(identify_set, IDEState),
2872         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2873         VMSTATE_UINT8(feature, IDEState),
2874         VMSTATE_UINT8(error, IDEState),
2875         VMSTATE_UINT32(nsector, IDEState),
2876         VMSTATE_UINT8(sector, IDEState),
2877         VMSTATE_UINT8(lcyl, IDEState),
2878         VMSTATE_UINT8(hcyl, IDEState),
2879         VMSTATE_UINT8(hob_feature, IDEState),
2880         VMSTATE_UINT8(hob_sector, IDEState),
2881         VMSTATE_UINT8(hob_nsector, IDEState),
2882         VMSTATE_UINT8(hob_lcyl, IDEState),
2883         VMSTATE_UINT8(hob_hcyl, IDEState),
2884         VMSTATE_UINT8(select, IDEState),
2885         VMSTATE_UINT8(status, IDEState),
2886         VMSTATE_UINT8(lba48, IDEState),
2887         VMSTATE_UINT8(sense_key, IDEState),
2888         VMSTATE_UINT8(asc, IDEState),
2889         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2890         VMSTATE_END_OF_LIST()
2891     },
2892     .subsections = (const VMStateDescription*[]) {
2893         &vmstate_ide_drive_pio_state,
2894         &vmstate_ide_tray_state,
2895         &vmstate_ide_atapi_gesn_state,
2896         NULL
2897     }
2898 };
2899 
2900 static const VMStateDescription vmstate_ide_error_status = {
2901     .name ="ide_bus/error",
2902     .version_id = 2,
2903     .minimum_version_id = 1,
2904     .needed = ide_error_needed,
2905     .fields = (VMStateField[]) {
2906         VMSTATE_INT32(error_status, IDEBus),
2907         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2908         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2909         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2910         VMSTATE_END_OF_LIST()
2911     }
2912 };
2913 
2914 const VMStateDescription vmstate_ide_bus = {
2915     .name = "ide_bus",
2916     .version_id = 1,
2917     .minimum_version_id = 1,
2918     .fields = (VMStateField[]) {
2919         VMSTATE_UINT8(cmd, IDEBus),
2920         VMSTATE_UINT8(unit, IDEBus),
2921         VMSTATE_END_OF_LIST()
2922     },
2923     .subsections = (const VMStateDescription*[]) {
2924         &vmstate_ide_error_status,
2925         NULL
2926     }
2927 };
2928 
2929 void ide_drive_get(DriveInfo **hd, int n)
2930 {
2931     int i;
2932 
2933     for (i = 0; i < n; i++) {
2934         hd[i] = drive_get_by_index(IF_IDE, i);
2935     }
2936 }
2937