xref: /openbmc/qemu/hw/ide/core.c (revision 98979cdc)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 #include "qemu/error-report.h"
38 
39 #include "hw/ide/internal.h"
40 #include "trace.h"
41 
42 /* These values were based on a Seagate ST3500418AS but have been modified
43    to make more sense in QEMU */
44 static const int smart_attributes[][12] = {
45     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
46     /* raw read error rate*/
47     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
48     /* spin up */
49     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50     /* start stop count */
51     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52     /* remapped sectors */
53     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
54     /* power on hours */
55     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* power cycle count */
57     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58     /* airflow-temperature-celsius */
59     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
60 };
61 
62 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
63     [IDE_DMA_READ] = "DMA READ",
64     [IDE_DMA_WRITE] = "DMA WRITE",
65     [IDE_DMA_TRIM] = "DMA TRIM",
66     [IDE_DMA_ATAPI] = "DMA ATAPI"
67 };
68 
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
70 {
71     if ((unsigned)enval < IDE_DMA__COUNT) {
72         return IDE_DMA_CMD_lookup[enval];
73     }
74     return "DMA UNKNOWN CMD";
75 }
76 
77 static void ide_dummy_transfer_stop(IDEState *s);
78 
79 static void padstr(char *str, const char *src, int len)
80 {
81     int i, v;
82     for(i = 0; i < len; i++) {
83         if (*src)
84             v = *src++;
85         else
86             v = ' ';
87         str[i^1] = v;
88     }
89 }
90 
91 static void put_le16(uint16_t *p, unsigned int v)
92 {
93     *p = cpu_to_le16(v);
94 }
95 
96 static void ide_identify_size(IDEState *s)
97 {
98     uint16_t *p = (uint16_t *)s->identify_data;
99     put_le16(p + 60, s->nb_sectors);
100     put_le16(p + 61, s->nb_sectors >> 16);
101     put_le16(p + 100, s->nb_sectors);
102     put_le16(p + 101, s->nb_sectors >> 16);
103     put_le16(p + 102, s->nb_sectors >> 32);
104     put_le16(p + 103, s->nb_sectors >> 48);
105 }
106 
107 static void ide_identify(IDEState *s)
108 {
109     uint16_t *p;
110     unsigned int oldsize;
111     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
112 
113     p = (uint16_t *)s->identify_data;
114     if (s->identify_set) {
115         goto fill_buffer;
116     }
117     memset(p, 0, sizeof(s->identify_data));
118 
119     put_le16(p + 0, 0x0040);
120     put_le16(p + 1, s->cylinders);
121     put_le16(p + 3, s->heads);
122     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
123     put_le16(p + 5, 512); /* XXX: retired, remove ? */
124     put_le16(p + 6, s->sectors);
125     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
126     put_le16(p + 20, 3); /* XXX: retired, remove ? */
127     put_le16(p + 21, 512); /* cache size in sectors */
128     put_le16(p + 22, 4); /* ecc bytes */
129     padstr((char *)(p + 23), s->version, 8); /* firmware version */
130     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
133 #endif
134     put_le16(p + 48, 1); /* dword I/O */
135     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136     put_le16(p + 51, 0x200); /* PIO transfer cycle */
137     put_le16(p + 52, 0x200); /* DMA transfer cycle */
138     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139     put_le16(p + 54, s->cylinders);
140     put_le16(p + 55, s->heads);
141     put_le16(p + 56, s->sectors);
142     oldsize = s->cylinders * s->heads * s->sectors;
143     put_le16(p + 57, oldsize);
144     put_le16(p + 58, oldsize >> 16);
145     if (s->mult_sectors)
146         put_le16(p + 59, 0x100 | s->mult_sectors);
147     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
148     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
150     put_le16(p + 63, 0x07); /* mdma0-2 supported */
151     put_le16(p + 64, 0x03); /* pio3-4 supported */
152     put_le16(p + 65, 120);
153     put_le16(p + 66, 120);
154     put_le16(p + 67, 120);
155     put_le16(p + 68, 120);
156     if (dev && dev->conf.discard_granularity) {
157         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
158     }
159 
160     if (s->ncq_queues) {
161         put_le16(p + 75, s->ncq_queues - 1);
162         /* NCQ supported */
163         put_le16(p + 76, (1 << 8));
164     }
165 
166     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
167     put_le16(p + 81, 0x16); /* conforms to ata5 */
168     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
170     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
173     if (s->wwn) {
174         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
175     } else {
176         put_le16(p + 84, (1 << 14) | 0);
177     }
178     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179     if (blk_enable_write_cache(s->blk)) {
180         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
181     } else {
182         put_le16(p + 85, (1 << 14) | 1);
183     }
184     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
186     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
187     if (s->wwn) {
188         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
189     } else {
190         put_le16(p + 87, (1 << 14) | 0);
191     }
192     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
194     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
195     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
198 
199     if (dev && dev->conf.physical_block_size)
200         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
201     if (s->wwn) {
202         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203         put_le16(p + 108, s->wwn >> 48);
204         put_le16(p + 109, s->wwn >> 32);
205         put_le16(p + 110, s->wwn >> 16);
206         put_le16(p + 111, s->wwn);
207     }
208     if (dev && dev->conf.discard_granularity) {
209         put_le16(p + 169, 1); /* TRIM support */
210     }
211 
212     ide_identify_size(s);
213     s->identify_set = 1;
214 
215 fill_buffer:
216     memcpy(s->io_buffer, p, sizeof(s->identify_data));
217 }
218 
219 static void ide_atapi_identify(IDEState *s)
220 {
221     uint16_t *p;
222 
223     p = (uint16_t *)s->identify_data;
224     if (s->identify_set) {
225         goto fill_buffer;
226     }
227     memset(p, 0, sizeof(s->identify_data));
228 
229     /* Removable CDROM, 50us response, 12 byte packets */
230     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
231     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
232     put_le16(p + 20, 3); /* buffer type */
233     put_le16(p + 21, 512); /* cache size in sectors */
234     put_le16(p + 22, 4); /* ecc bytes */
235     padstr((char *)(p + 23), s->version, 8); /* firmware version */
236     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
237     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
238 #ifdef USE_DMA_CDROM
239     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
240     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
241     put_le16(p + 62, 7);  /* single word dma0-2 supported */
242     put_le16(p + 63, 7);  /* mdma0-2 supported */
243 #else
244     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
245     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
246     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
247 #endif
248     put_le16(p + 64, 3); /* pio3-4 supported */
249     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
250     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
251     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
252     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
253 
254     put_le16(p + 71, 30); /* in ns */
255     put_le16(p + 72, 30); /* in ns */
256 
257     if (s->ncq_queues) {
258         put_le16(p + 75, s->ncq_queues - 1);
259         /* NCQ supported */
260         put_le16(p + 76, (1 << 8));
261     }
262 
263     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
264     if (s->wwn) {
265         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
266         put_le16(p + 87, (1 << 8)); /* WWN enabled */
267     }
268 
269 #ifdef USE_DMA_CDROM
270     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
271 #endif
272 
273     if (s->wwn) {
274         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
275         put_le16(p + 108, s->wwn >> 48);
276         put_le16(p + 109, s->wwn >> 32);
277         put_le16(p + 110, s->wwn >> 16);
278         put_le16(p + 111, s->wwn);
279     }
280 
281     s->identify_set = 1;
282 
283 fill_buffer:
284     memcpy(s->io_buffer, p, sizeof(s->identify_data));
285 }
286 
287 static void ide_cfata_identify_size(IDEState *s)
288 {
289     uint16_t *p = (uint16_t *)s->identify_data;
290     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
291     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
292     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
293     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
294 }
295 
296 static void ide_cfata_identify(IDEState *s)
297 {
298     uint16_t *p;
299     uint32_t cur_sec;
300 
301     p = (uint16_t *)s->identify_data;
302     if (s->identify_set) {
303         goto fill_buffer;
304     }
305     memset(p, 0, sizeof(s->identify_data));
306 
307     cur_sec = s->cylinders * s->heads * s->sectors;
308 
309     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
310     put_le16(p + 1, s->cylinders);		/* Default cylinders */
311     put_le16(p + 3, s->heads);			/* Default heads */
312     put_le16(p + 6, s->sectors);		/* Default sectors per track */
313     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
314     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
315     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
316     put_le16(p + 22, 0x0004);			/* ECC bytes */
317     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
318     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
319 #if MAX_MULT_SECTORS > 1
320     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
321 #else
322     put_le16(p + 47, 0x0000);
323 #endif
324     put_le16(p + 49, 0x0f00);			/* Capabilities */
325     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
326     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
327     put_le16(p + 53, 0x0003);			/* Translation params valid */
328     put_le16(p + 54, s->cylinders);		/* Current cylinders */
329     put_le16(p + 55, s->heads);			/* Current heads */
330     put_le16(p + 56, s->sectors);		/* Current sectors */
331     put_le16(p + 57, cur_sec);			/* Current capacity */
332     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
333     if (s->mult_sectors)			/* Multiple sector setting */
334         put_le16(p + 59, 0x100 | s->mult_sectors);
335     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
336     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
337     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
338     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
339     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
340     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
341     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
342     put_le16(p + 82, 0x400c);			/* Command Set supported */
343     put_le16(p + 83, 0x7068);			/* Command Set supported */
344     put_le16(p + 84, 0x4000);			/* Features supported */
345     put_le16(p + 85, 0x000c);			/* Command Set enabled */
346     put_le16(p + 86, 0x7044);			/* Command Set enabled */
347     put_le16(p + 87, 0x4000);			/* Features enabled */
348     put_le16(p + 91, 0x4060);			/* Current APM level */
349     put_le16(p + 129, 0x0002);			/* Current features option */
350     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
351     put_le16(p + 131, 0x0001);			/* Initial power mode */
352     put_le16(p + 132, 0x0000);			/* User signature */
353     put_le16(p + 160, 0x8100);			/* Power requirement */
354     put_le16(p + 161, 0x8001);			/* CF command set */
355 
356     ide_cfata_identify_size(s);
357     s->identify_set = 1;
358 
359 fill_buffer:
360     memcpy(s->io_buffer, p, sizeof(s->identify_data));
361 }
362 
363 static void ide_set_signature(IDEState *s)
364 {
365     s->select &= 0xf0; /* clear head */
366     /* put signature */
367     s->nsector = 1;
368     s->sector = 1;
369     if (s->drive_kind == IDE_CD) {
370         s->lcyl = 0x14;
371         s->hcyl = 0xeb;
372     } else if (s->blk) {
373         s->lcyl = 0;
374         s->hcyl = 0;
375     } else {
376         s->lcyl = 0xff;
377         s->hcyl = 0xff;
378     }
379 }
380 
381 typedef struct TrimAIOCB {
382     BlockAIOCB common;
383     BlockBackend *blk;
384     QEMUBH *bh;
385     int ret;
386     QEMUIOVector *qiov;
387     BlockAIOCB *aiocb;
388     int i, j;
389 } TrimAIOCB;
390 
391 static void trim_aio_cancel(BlockAIOCB *acb)
392 {
393     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
394 
395     /* Exit the loop so ide_issue_trim_cb will not continue  */
396     iocb->j = iocb->qiov->niov - 1;
397     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
398 
399     iocb->ret = -ECANCELED;
400 
401     if (iocb->aiocb) {
402         blk_aio_cancel_async(iocb->aiocb);
403         iocb->aiocb = NULL;
404     }
405 }
406 
407 static const AIOCBInfo trim_aiocb_info = {
408     .aiocb_size         = sizeof(TrimAIOCB),
409     .cancel_async       = trim_aio_cancel,
410 };
411 
412 static void ide_trim_bh_cb(void *opaque)
413 {
414     TrimAIOCB *iocb = opaque;
415 
416     iocb->common.cb(iocb->common.opaque, iocb->ret);
417 
418     qemu_bh_delete(iocb->bh);
419     iocb->bh = NULL;
420     qemu_aio_unref(iocb);
421 }
422 
423 static void ide_issue_trim_cb(void *opaque, int ret)
424 {
425     TrimAIOCB *iocb = opaque;
426     if (ret >= 0) {
427         while (iocb->j < iocb->qiov->niov) {
428             int j = iocb->j;
429             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
430                 int i = iocb->i;
431                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
432 
433                 /* 6-byte LBA + 2-byte range per entry */
434                 uint64_t entry = le64_to_cpu(buffer[i]);
435                 uint64_t sector = entry & 0x0000ffffffffffffULL;
436                 uint16_t count = entry >> 48;
437 
438                 if (count == 0) {
439                     continue;
440                 }
441 
442                 /* Got an entry! Submit and exit.  */
443                 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
444                                                sector << BDRV_SECTOR_BITS,
445                                                count << BDRV_SECTOR_BITS,
446                                                ide_issue_trim_cb, opaque);
447                 return;
448             }
449 
450             iocb->j++;
451             iocb->i = -1;
452         }
453     } else {
454         iocb->ret = ret;
455     }
456 
457     iocb->aiocb = NULL;
458     if (iocb->bh) {
459         qemu_bh_schedule(iocb->bh);
460     }
461 }
462 
463 BlockAIOCB *ide_issue_trim(
464         int64_t offset, QEMUIOVector *qiov,
465         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
466 {
467     BlockBackend *blk = opaque;
468     TrimAIOCB *iocb;
469 
470     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
471     iocb->blk = blk;
472     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
473     iocb->ret = 0;
474     iocb->qiov = qiov;
475     iocb->i = -1;
476     iocb->j = 0;
477     ide_issue_trim_cb(iocb, 0);
478     return &iocb->common;
479 }
480 
481 void ide_abort_command(IDEState *s)
482 {
483     ide_transfer_stop(s);
484     s->status = READY_STAT | ERR_STAT;
485     s->error = ABRT_ERR;
486 }
487 
488 static void ide_set_retry(IDEState *s)
489 {
490     s->bus->retry_unit = s->unit;
491     s->bus->retry_sector_num = ide_get_sector(s);
492     s->bus->retry_nsector = s->nsector;
493 }
494 
495 static void ide_clear_retry(IDEState *s)
496 {
497     s->bus->retry_unit = -1;
498     s->bus->retry_sector_num = 0;
499     s->bus->retry_nsector = 0;
500 }
501 
502 /* prepare data transfer and tell what to do after */
503 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
504                         EndTransferFunc *end_transfer_func)
505 {
506     s->end_transfer_func = end_transfer_func;
507     s->data_ptr = buf;
508     s->data_end = buf + size;
509     ide_set_retry(s);
510     if (!(s->status & ERR_STAT)) {
511         s->status |= DRQ_STAT;
512     }
513     if (s->bus->dma->ops->start_transfer) {
514         s->bus->dma->ops->start_transfer(s->bus->dma);
515     }
516 }
517 
518 static void ide_cmd_done(IDEState *s)
519 {
520     if (s->bus->dma->ops->cmd_done) {
521         s->bus->dma->ops->cmd_done(s->bus->dma);
522     }
523 }
524 
525 static void ide_transfer_halt(IDEState *s,
526                               void(*end_transfer_func)(IDEState *),
527                               bool notify)
528 {
529     s->end_transfer_func = end_transfer_func;
530     s->data_ptr = s->io_buffer;
531     s->data_end = s->io_buffer;
532     s->status &= ~DRQ_STAT;
533     if (notify) {
534         ide_cmd_done(s);
535     }
536 }
537 
538 void ide_transfer_stop(IDEState *s)
539 {
540     ide_transfer_halt(s, ide_transfer_stop, true);
541 }
542 
543 static void ide_transfer_cancel(IDEState *s)
544 {
545     ide_transfer_halt(s, ide_transfer_cancel, false);
546 }
547 
548 int64_t ide_get_sector(IDEState *s)
549 {
550     int64_t sector_num;
551     if (s->select & 0x40) {
552         /* lba */
553 	if (!s->lba48) {
554 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
555 		(s->lcyl << 8) | s->sector;
556 	} else {
557 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
558 		((int64_t) s->hob_lcyl << 32) |
559 		((int64_t) s->hob_sector << 24) |
560 		((int64_t) s->hcyl << 16) |
561 		((int64_t) s->lcyl << 8) | s->sector;
562 	}
563     } else {
564         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
565             (s->select & 0x0f) * s->sectors + (s->sector - 1);
566     }
567     return sector_num;
568 }
569 
570 void ide_set_sector(IDEState *s, int64_t sector_num)
571 {
572     unsigned int cyl, r;
573     if (s->select & 0x40) {
574 	if (!s->lba48) {
575             s->select = (s->select & 0xf0) | (sector_num >> 24);
576             s->hcyl = (sector_num >> 16);
577             s->lcyl = (sector_num >> 8);
578             s->sector = (sector_num);
579 	} else {
580 	    s->sector = sector_num;
581 	    s->lcyl = sector_num >> 8;
582 	    s->hcyl = sector_num >> 16;
583 	    s->hob_sector = sector_num >> 24;
584 	    s->hob_lcyl = sector_num >> 32;
585 	    s->hob_hcyl = sector_num >> 40;
586 	}
587     } else {
588         cyl = sector_num / (s->heads * s->sectors);
589         r = sector_num % (s->heads * s->sectors);
590         s->hcyl = cyl >> 8;
591         s->lcyl = cyl;
592         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
593         s->sector = (r % s->sectors) + 1;
594     }
595 }
596 
597 static void ide_rw_error(IDEState *s) {
598     ide_abort_command(s);
599     ide_set_irq(s->bus);
600 }
601 
602 static bool ide_sect_range_ok(IDEState *s,
603                               uint64_t sector, uint64_t nb_sectors)
604 {
605     uint64_t total_sectors;
606 
607     blk_get_geometry(s->blk, &total_sectors);
608     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
609         return false;
610     }
611     return true;
612 }
613 
614 static void ide_buffered_readv_cb(void *opaque, int ret)
615 {
616     IDEBufferedRequest *req = opaque;
617     if (!req->orphaned) {
618         if (!ret) {
619             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
620                                 req->original_qiov->size);
621         }
622         req->original_cb(req->original_opaque, ret);
623     }
624     QLIST_REMOVE(req, list);
625     qemu_vfree(req->iov.iov_base);
626     g_free(req);
627 }
628 
629 #define MAX_BUFFERED_REQS 16
630 
631 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
632                                QEMUIOVector *iov, int nb_sectors,
633                                BlockCompletionFunc *cb, void *opaque)
634 {
635     BlockAIOCB *aioreq;
636     IDEBufferedRequest *req;
637     int c = 0;
638 
639     QLIST_FOREACH(req, &s->buffered_requests, list) {
640         c++;
641     }
642     if (c > MAX_BUFFERED_REQS) {
643         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
644     }
645 
646     req = g_new0(IDEBufferedRequest, 1);
647     req->original_qiov = iov;
648     req->original_cb = cb;
649     req->original_opaque = opaque;
650     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
651     req->iov.iov_len = iov->size;
652     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
653 
654     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
655                             &req->qiov, 0, ide_buffered_readv_cb, req);
656 
657     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
658     return aioreq;
659 }
660 
661 /**
662  * Cancel all pending DMA requests.
663  * Any buffered DMA requests are instantly canceled,
664  * but any pending unbuffered DMA requests must be waited on.
665  */
666 void ide_cancel_dma_sync(IDEState *s)
667 {
668     IDEBufferedRequest *req;
669 
670     /* First invoke the callbacks of all buffered requests
671      * and flag those requests as orphaned. Ideally there
672      * are no unbuffered (Scatter Gather DMA Requests or
673      * write requests) pending and we can avoid to drain. */
674     QLIST_FOREACH(req, &s->buffered_requests, list) {
675         if (!req->orphaned) {
676             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
677             req->original_cb(req->original_opaque, -ECANCELED);
678         }
679         req->orphaned = true;
680     }
681 
682     /*
683      * We can't cancel Scatter Gather DMA in the middle of the
684      * operation or a partial (not full) DMA transfer would reach
685      * the storage so we wait for completion instead (we beahve
686      * like if the DMA was completed by the time the guest trying
687      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
688      * set).
689      *
690      * In the future we'll be able to safely cancel the I/O if the
691      * whole DMA operation will be submitted to disk with a single
692      * aio operation with preadv/pwritev.
693      */
694     if (s->bus->dma->aiocb) {
695         trace_ide_cancel_dma_sync_remaining();
696         blk_drain(s->blk);
697         assert(s->bus->dma->aiocb == NULL);
698     }
699 }
700 
701 static void ide_sector_read(IDEState *s);
702 
703 static void ide_sector_read_cb(void *opaque, int ret)
704 {
705     IDEState *s = opaque;
706     int n;
707 
708     s->pio_aiocb = NULL;
709     s->status &= ~BUSY_STAT;
710 
711     if (ret == -ECANCELED) {
712         return;
713     }
714     if (ret != 0) {
715         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
716                                 IDE_RETRY_READ)) {
717             return;
718         }
719     }
720 
721     block_acct_done(blk_get_stats(s->blk), &s->acct);
722 
723     n = s->nsector;
724     if (n > s->req_nb_sectors) {
725         n = s->req_nb_sectors;
726     }
727 
728     ide_set_sector(s, ide_get_sector(s) + n);
729     s->nsector -= n;
730     /* Allow the guest to read the io_buffer */
731     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
732     ide_set_irq(s->bus);
733 }
734 
735 static void ide_sector_read(IDEState *s)
736 {
737     int64_t sector_num;
738     int n;
739 
740     s->status = READY_STAT | SEEK_STAT;
741     s->error = 0; /* not needed by IDE spec, but needed by Windows */
742     sector_num = ide_get_sector(s);
743     n = s->nsector;
744 
745     if (n == 0) {
746         ide_transfer_stop(s);
747         return;
748     }
749 
750     s->status |= BUSY_STAT;
751 
752     if (n > s->req_nb_sectors) {
753         n = s->req_nb_sectors;
754     }
755 
756     trace_ide_sector_read(sector_num, n);
757 
758     if (!ide_sect_range_ok(s, sector_num, n)) {
759         ide_rw_error(s);
760         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
761         return;
762     }
763 
764     s->iov.iov_base = s->io_buffer;
765     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
766     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
767 
768     block_acct_start(blk_get_stats(s->blk), &s->acct,
769                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
770     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
771                                       ide_sector_read_cb, s);
772 }
773 
774 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
775 {
776     if (s->bus->dma->ops->commit_buf) {
777         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
778     }
779     s->io_buffer_offset += tx_bytes;
780     qemu_sglist_destroy(&s->sg);
781 }
782 
783 void ide_set_inactive(IDEState *s, bool more)
784 {
785     s->bus->dma->aiocb = NULL;
786     ide_clear_retry(s);
787     if (s->bus->dma->ops->set_inactive) {
788         s->bus->dma->ops->set_inactive(s->bus->dma, more);
789     }
790     ide_cmd_done(s);
791 }
792 
793 void ide_dma_error(IDEState *s)
794 {
795     dma_buf_commit(s, 0);
796     ide_abort_command(s);
797     ide_set_inactive(s, false);
798     ide_set_irq(s->bus);
799 }
800 
801 int ide_handle_rw_error(IDEState *s, int error, int op)
802 {
803     bool is_read = (op & IDE_RETRY_READ) != 0;
804     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
805 
806     if (action == BLOCK_ERROR_ACTION_STOP) {
807         assert(s->bus->retry_unit == s->unit);
808         s->bus->error_status = op;
809     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
810         block_acct_failed(blk_get_stats(s->blk), &s->acct);
811         if (IS_IDE_RETRY_DMA(op)) {
812             ide_dma_error(s);
813         } else if (IS_IDE_RETRY_ATAPI(op)) {
814             ide_atapi_io_error(s, -error);
815         } else {
816             ide_rw_error(s);
817         }
818     }
819     blk_error_action(s->blk, action, is_read, error);
820     return action != BLOCK_ERROR_ACTION_IGNORE;
821 }
822 
823 static void ide_dma_cb(void *opaque, int ret)
824 {
825     IDEState *s = opaque;
826     int n;
827     int64_t sector_num;
828     uint64_t offset;
829     bool stay_active = false;
830 
831     if (ret == -ECANCELED) {
832         return;
833     }
834     if (ret < 0) {
835         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
836             s->bus->dma->aiocb = NULL;
837             dma_buf_commit(s, 0);
838             return;
839         }
840     }
841 
842     n = s->io_buffer_size >> 9;
843     if (n > s->nsector) {
844         /* The PRDs were longer than needed for this request. Shorten them so
845          * we don't get a negative remainder. The Active bit must remain set
846          * after the request completes. */
847         n = s->nsector;
848         stay_active = true;
849     }
850 
851     sector_num = ide_get_sector(s);
852     if (n > 0) {
853         assert(n * 512 == s->sg.size);
854         dma_buf_commit(s, s->sg.size);
855         sector_num += n;
856         ide_set_sector(s, sector_num);
857         s->nsector -= n;
858     }
859 
860     /* end of transfer ? */
861     if (s->nsector == 0) {
862         s->status = READY_STAT | SEEK_STAT;
863         ide_set_irq(s->bus);
864         goto eot;
865     }
866 
867     /* launch next transfer */
868     n = s->nsector;
869     s->io_buffer_index = 0;
870     s->io_buffer_size = n * 512;
871     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
872         /* The PRDs were too short. Reset the Active bit, but don't raise an
873          * interrupt. */
874         s->status = READY_STAT | SEEK_STAT;
875         dma_buf_commit(s, 0);
876         goto eot;
877     }
878 
879     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
880 
881     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
882         !ide_sect_range_ok(s, sector_num, n)) {
883         ide_dma_error(s);
884         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
885         return;
886     }
887 
888     offset = sector_num << BDRV_SECTOR_BITS;
889     switch (s->dma_cmd) {
890     case IDE_DMA_READ:
891         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
892                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
893         break;
894     case IDE_DMA_WRITE:
895         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
896                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
897         break;
898     case IDE_DMA_TRIM:
899         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
900                                         &s->sg, offset, BDRV_SECTOR_SIZE,
901                                         ide_issue_trim, s->blk, ide_dma_cb, s,
902                                         DMA_DIRECTION_TO_DEVICE);
903         break;
904     default:
905         abort();
906     }
907     return;
908 
909 eot:
910     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
911         block_acct_done(blk_get_stats(s->blk), &s->acct);
912     }
913     ide_set_inactive(s, stay_active);
914 }
915 
916 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
917 {
918     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
919     s->io_buffer_size = 0;
920     s->dma_cmd = dma_cmd;
921 
922     switch (dma_cmd) {
923     case IDE_DMA_READ:
924         block_acct_start(blk_get_stats(s->blk), &s->acct,
925                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
926         break;
927     case IDE_DMA_WRITE:
928         block_acct_start(blk_get_stats(s->blk), &s->acct,
929                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
930         break;
931     default:
932         break;
933     }
934 
935     ide_start_dma(s, ide_dma_cb);
936 }
937 
938 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
939 {
940     s->io_buffer_index = 0;
941     ide_set_retry(s);
942     if (s->bus->dma->ops->start_dma) {
943         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
944     }
945 }
946 
947 static void ide_sector_write(IDEState *s);
948 
949 static void ide_sector_write_timer_cb(void *opaque)
950 {
951     IDEState *s = opaque;
952     ide_set_irq(s->bus);
953 }
954 
955 static void ide_sector_write_cb(void *opaque, int ret)
956 {
957     IDEState *s = opaque;
958     int n;
959 
960     if (ret == -ECANCELED) {
961         return;
962     }
963 
964     s->pio_aiocb = NULL;
965     s->status &= ~BUSY_STAT;
966 
967     if (ret != 0) {
968         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
969             return;
970         }
971     }
972 
973     block_acct_done(blk_get_stats(s->blk), &s->acct);
974 
975     n = s->nsector;
976     if (n > s->req_nb_sectors) {
977         n = s->req_nb_sectors;
978     }
979     s->nsector -= n;
980 
981     ide_set_sector(s, ide_get_sector(s) + n);
982     if (s->nsector == 0) {
983         /* no more sectors to write */
984         ide_transfer_stop(s);
985     } else {
986         int n1 = s->nsector;
987         if (n1 > s->req_nb_sectors) {
988             n1 = s->req_nb_sectors;
989         }
990         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
991                            ide_sector_write);
992     }
993 
994     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
995         /* It seems there is a bug in the Windows 2000 installer HDD
996            IDE driver which fills the disk with empty logs when the
997            IDE write IRQ comes too early. This hack tries to correct
998            that at the expense of slower write performances. Use this
999            option _only_ to install Windows 2000. You must disable it
1000            for normal use. */
1001         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1002                   (NANOSECONDS_PER_SECOND / 1000));
1003     } else {
1004         ide_set_irq(s->bus);
1005     }
1006 }
1007 
1008 static void ide_sector_write(IDEState *s)
1009 {
1010     int64_t sector_num;
1011     int n;
1012 
1013     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1014     sector_num = ide_get_sector(s);
1015 
1016     n = s->nsector;
1017     if (n > s->req_nb_sectors) {
1018         n = s->req_nb_sectors;
1019     }
1020 
1021     trace_ide_sector_write(sector_num, n);
1022 
1023     if (!ide_sect_range_ok(s, sector_num, n)) {
1024         ide_rw_error(s);
1025         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1026         return;
1027     }
1028 
1029     s->iov.iov_base = s->io_buffer;
1030     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1031     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1032 
1033     block_acct_start(blk_get_stats(s->blk), &s->acct,
1034                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1035     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1036                                    &s->qiov, 0, ide_sector_write_cb, s);
1037 }
1038 
1039 static void ide_flush_cb(void *opaque, int ret)
1040 {
1041     IDEState *s = opaque;
1042 
1043     s->pio_aiocb = NULL;
1044 
1045     if (ret == -ECANCELED) {
1046         return;
1047     }
1048     if (ret < 0) {
1049         /* XXX: What sector number to set here? */
1050         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1051             return;
1052         }
1053     }
1054 
1055     if (s->blk) {
1056         block_acct_done(blk_get_stats(s->blk), &s->acct);
1057     }
1058     s->status = READY_STAT | SEEK_STAT;
1059     ide_cmd_done(s);
1060     ide_set_irq(s->bus);
1061 }
1062 
1063 static void ide_flush_cache(IDEState *s)
1064 {
1065     if (s->blk == NULL) {
1066         ide_flush_cb(s, 0);
1067         return;
1068     }
1069 
1070     s->status |= BUSY_STAT;
1071     ide_set_retry(s);
1072     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1073 
1074     if (blk_bs(s->blk)) {
1075         s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1076     } else {
1077         /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1078          * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1079          */
1080         ide_flush_cb(s, 0);
1081     }
1082 }
1083 
1084 static void ide_cfata_metadata_inquiry(IDEState *s)
1085 {
1086     uint16_t *p;
1087     uint32_t spd;
1088 
1089     p = (uint16_t *) s->io_buffer;
1090     memset(p, 0, 0x200);
1091     spd = ((s->mdata_size - 1) >> 9) + 1;
1092 
1093     put_le16(p + 0, 0x0001);			/* Data format revision */
1094     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1095     put_le16(p + 2, s->media_changed);		/* Media status */
1096     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1097     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1098     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1099     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1100 }
1101 
1102 static void ide_cfata_metadata_read(IDEState *s)
1103 {
1104     uint16_t *p;
1105 
1106     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1107         s->status = ERR_STAT;
1108         s->error = ABRT_ERR;
1109         return;
1110     }
1111 
1112     p = (uint16_t *) s->io_buffer;
1113     memset(p, 0, 0x200);
1114 
1115     put_le16(p + 0, s->media_changed);		/* Media status */
1116     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1117                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1118                                     s->nsector << 9), 0x200 - 2));
1119 }
1120 
1121 static void ide_cfata_metadata_write(IDEState *s)
1122 {
1123     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1124         s->status = ERR_STAT;
1125         s->error = ABRT_ERR;
1126         return;
1127     }
1128 
1129     s->media_changed = 0;
1130 
1131     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1132                     s->io_buffer + 2,
1133                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1134                                     s->nsector << 9), 0x200 - 2));
1135 }
1136 
1137 /* called when the inserted state of the media has changed */
1138 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1139 {
1140     IDEState *s = opaque;
1141     uint64_t nb_sectors;
1142 
1143     s->tray_open = !load;
1144     blk_get_geometry(s->blk, &nb_sectors);
1145     s->nb_sectors = nb_sectors;
1146 
1147     /*
1148      * First indicate to the guest that a CD has been removed.  That's
1149      * done on the next command the guest sends us.
1150      *
1151      * Then we set UNIT_ATTENTION, by which the guest will
1152      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1153      */
1154     s->cdrom_changed = 1;
1155     s->events.new_media = true;
1156     s->events.eject_request = false;
1157     ide_set_irq(s->bus);
1158 }
1159 
1160 static void ide_cd_eject_request_cb(void *opaque, bool force)
1161 {
1162     IDEState *s = opaque;
1163 
1164     s->events.eject_request = true;
1165     if (force) {
1166         s->tray_locked = false;
1167     }
1168     ide_set_irq(s->bus);
1169 }
1170 
1171 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1172 {
1173     s->lba48 = lba48;
1174 
1175     /* handle the 'magic' 0 nsector count conversion here. to avoid
1176      * fiddling with the rest of the read logic, we just store the
1177      * full sector count in ->nsector and ignore ->hob_nsector from now
1178      */
1179     if (!s->lba48) {
1180 	if (!s->nsector)
1181 	    s->nsector = 256;
1182     } else {
1183 	if (!s->nsector && !s->hob_nsector)
1184 	    s->nsector = 65536;
1185 	else {
1186 	    int lo = s->nsector;
1187 	    int hi = s->hob_nsector;
1188 
1189 	    s->nsector = (hi << 8) | lo;
1190 	}
1191     }
1192 }
1193 
1194 static void ide_clear_hob(IDEBus *bus)
1195 {
1196     /* any write clears HOB high bit of device control register */
1197     bus->ifs[0].select &= ~(1 << 7);
1198     bus->ifs[1].select &= ~(1 << 7);
1199 }
1200 
1201 /* IOport [W]rite [R]egisters */
1202 enum ATA_IOPORT_WR {
1203     ATA_IOPORT_WR_DATA = 0,
1204     ATA_IOPORT_WR_FEATURES = 1,
1205     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1206     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1207     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1208     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1209     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1210     ATA_IOPORT_WR_COMMAND = 7,
1211     ATA_IOPORT_WR_NUM_REGISTERS,
1212 };
1213 
1214 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1215     [ATA_IOPORT_WR_DATA] = "Data",
1216     [ATA_IOPORT_WR_FEATURES] = "Features",
1217     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1218     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1219     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1220     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1221     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1222     [ATA_IOPORT_WR_COMMAND] = "Command"
1223 };
1224 
1225 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1226 {
1227     IDEBus *bus = opaque;
1228     IDEState *s = idebus_active_if(bus);
1229     int reg_num = addr & 7;
1230 
1231     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1232 
1233     /* ignore writes to command block while busy with previous command */
1234     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1235         return;
1236     }
1237 
1238     switch (reg_num) {
1239     case 0:
1240         break;
1241     case ATA_IOPORT_WR_FEATURES:
1242         ide_clear_hob(bus);
1243         /* NOTE: data is written to the two drives */
1244         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1245         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1246         bus->ifs[0].feature = val;
1247         bus->ifs[1].feature = val;
1248         break;
1249     case ATA_IOPORT_WR_SECTOR_COUNT:
1250 	ide_clear_hob(bus);
1251 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1252 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1253         bus->ifs[0].nsector = val;
1254         bus->ifs[1].nsector = val;
1255         break;
1256     case ATA_IOPORT_WR_SECTOR_NUMBER:
1257 	ide_clear_hob(bus);
1258 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1259 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1260         bus->ifs[0].sector = val;
1261         bus->ifs[1].sector = val;
1262         break;
1263     case ATA_IOPORT_WR_CYLINDER_LOW:
1264 	ide_clear_hob(bus);
1265 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1266 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1267         bus->ifs[0].lcyl = val;
1268         bus->ifs[1].lcyl = val;
1269         break;
1270     case ATA_IOPORT_WR_CYLINDER_HIGH:
1271 	ide_clear_hob(bus);
1272 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1273 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1274         bus->ifs[0].hcyl = val;
1275         bus->ifs[1].hcyl = val;
1276         break;
1277     case ATA_IOPORT_WR_DEVICE_HEAD:
1278 	/* FIXME: HOB readback uses bit 7 */
1279         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1280         bus->ifs[1].select = (val | 0x10) | 0xa0;
1281         /* select drive */
1282         bus->unit = (val >> 4) & 1;
1283         break;
1284     default:
1285     case ATA_IOPORT_WR_COMMAND:
1286         /* command */
1287         ide_exec_cmd(bus, val);
1288         break;
1289     }
1290 }
1291 
1292 static void ide_reset(IDEState *s)
1293 {
1294     trace_ide_reset(s);
1295 
1296     if (s->pio_aiocb) {
1297         blk_aio_cancel(s->pio_aiocb);
1298         s->pio_aiocb = NULL;
1299     }
1300 
1301     if (s->drive_kind == IDE_CFATA)
1302         s->mult_sectors = 0;
1303     else
1304         s->mult_sectors = MAX_MULT_SECTORS;
1305     /* ide regs */
1306     s->feature = 0;
1307     s->error = 0;
1308     s->nsector = 0;
1309     s->sector = 0;
1310     s->lcyl = 0;
1311     s->hcyl = 0;
1312 
1313     /* lba48 */
1314     s->hob_feature = 0;
1315     s->hob_sector = 0;
1316     s->hob_nsector = 0;
1317     s->hob_lcyl = 0;
1318     s->hob_hcyl = 0;
1319 
1320     s->select = 0xa0;
1321     s->status = READY_STAT | SEEK_STAT;
1322 
1323     s->lba48 = 0;
1324 
1325     /* ATAPI specific */
1326     s->sense_key = 0;
1327     s->asc = 0;
1328     s->cdrom_changed = 0;
1329     s->packet_transfer_size = 0;
1330     s->elementary_transfer_size = 0;
1331     s->io_buffer_index = 0;
1332     s->cd_sector_size = 0;
1333     s->atapi_dma = 0;
1334     s->tray_locked = 0;
1335     s->tray_open = 0;
1336     /* ATA DMA state */
1337     s->io_buffer_size = 0;
1338     s->req_nb_sectors = 0;
1339 
1340     ide_set_signature(s);
1341     /* init the transfer handler so that 0xffff is returned on data
1342        accesses */
1343     s->end_transfer_func = ide_dummy_transfer_stop;
1344     ide_dummy_transfer_stop(s);
1345     s->media_changed = 0;
1346 }
1347 
1348 static bool cmd_nop(IDEState *s, uint8_t cmd)
1349 {
1350     return true;
1351 }
1352 
1353 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1354 {
1355     /* Halt PIO (in the DRQ phase), then DMA */
1356     ide_transfer_cancel(s);
1357     ide_cancel_dma_sync(s);
1358 
1359     /* Reset any PIO commands, reset signature, etc */
1360     ide_reset(s);
1361 
1362     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1363      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1364     s->status = 0x00;
1365 
1366     /* Do not overwrite status register */
1367     return false;
1368 }
1369 
1370 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1371 {
1372     switch (s->feature) {
1373     case DSM_TRIM:
1374         if (s->blk) {
1375             ide_sector_start_dma(s, IDE_DMA_TRIM);
1376             return false;
1377         }
1378         break;
1379     }
1380 
1381     ide_abort_command(s);
1382     return true;
1383 }
1384 
1385 static bool cmd_identify(IDEState *s, uint8_t cmd)
1386 {
1387     if (s->blk && s->drive_kind != IDE_CD) {
1388         if (s->drive_kind != IDE_CFATA) {
1389             ide_identify(s);
1390         } else {
1391             ide_cfata_identify(s);
1392         }
1393         s->status = READY_STAT | SEEK_STAT;
1394         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1395         ide_set_irq(s->bus);
1396         return false;
1397     } else {
1398         if (s->drive_kind == IDE_CD) {
1399             ide_set_signature(s);
1400         }
1401         ide_abort_command(s);
1402     }
1403 
1404     return true;
1405 }
1406 
1407 static bool cmd_verify(IDEState *s, uint8_t cmd)
1408 {
1409     bool lba48 = (cmd == WIN_VERIFY_EXT);
1410 
1411     /* do sector number check ? */
1412     ide_cmd_lba48_transform(s, lba48);
1413 
1414     return true;
1415 }
1416 
1417 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1418 {
1419     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1420         /* Disable Read and Write Multiple */
1421         s->mult_sectors = 0;
1422     } else if ((s->nsector & 0xff) != 0 &&
1423         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1424          (s->nsector & (s->nsector - 1)) != 0)) {
1425         ide_abort_command(s);
1426     } else {
1427         s->mult_sectors = s->nsector & 0xff;
1428     }
1429 
1430     return true;
1431 }
1432 
1433 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1434 {
1435     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1436 
1437     if (!s->blk || !s->mult_sectors) {
1438         ide_abort_command(s);
1439         return true;
1440     }
1441 
1442     ide_cmd_lba48_transform(s, lba48);
1443     s->req_nb_sectors = s->mult_sectors;
1444     ide_sector_read(s);
1445     return false;
1446 }
1447 
1448 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1449 {
1450     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1451     int n;
1452 
1453     if (!s->blk || !s->mult_sectors) {
1454         ide_abort_command(s);
1455         return true;
1456     }
1457 
1458     ide_cmd_lba48_transform(s, lba48);
1459 
1460     s->req_nb_sectors = s->mult_sectors;
1461     n = MIN(s->nsector, s->req_nb_sectors);
1462 
1463     s->status = SEEK_STAT | READY_STAT;
1464     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1465 
1466     s->media_changed = 1;
1467 
1468     return false;
1469 }
1470 
1471 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1472 {
1473     bool lba48 = (cmd == WIN_READ_EXT);
1474 
1475     if (s->drive_kind == IDE_CD) {
1476         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1477         ide_abort_command(s);
1478         return true;
1479     }
1480 
1481     if (!s->blk) {
1482         ide_abort_command(s);
1483         return true;
1484     }
1485 
1486     ide_cmd_lba48_transform(s, lba48);
1487     s->req_nb_sectors = 1;
1488     ide_sector_read(s);
1489 
1490     return false;
1491 }
1492 
1493 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1494 {
1495     bool lba48 = (cmd == WIN_WRITE_EXT);
1496 
1497     if (!s->blk) {
1498         ide_abort_command(s);
1499         return true;
1500     }
1501 
1502     ide_cmd_lba48_transform(s, lba48);
1503 
1504     s->req_nb_sectors = 1;
1505     s->status = SEEK_STAT | READY_STAT;
1506     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1507 
1508     s->media_changed = 1;
1509 
1510     return false;
1511 }
1512 
1513 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1514 {
1515     bool lba48 = (cmd == WIN_READDMA_EXT);
1516 
1517     if (!s->blk) {
1518         ide_abort_command(s);
1519         return true;
1520     }
1521 
1522     ide_cmd_lba48_transform(s, lba48);
1523     ide_sector_start_dma(s, IDE_DMA_READ);
1524 
1525     return false;
1526 }
1527 
1528 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1529 {
1530     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1531 
1532     if (!s->blk) {
1533         ide_abort_command(s);
1534         return true;
1535     }
1536 
1537     ide_cmd_lba48_transform(s, lba48);
1538     ide_sector_start_dma(s, IDE_DMA_WRITE);
1539 
1540     s->media_changed = 1;
1541 
1542     return false;
1543 }
1544 
1545 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1546 {
1547     ide_flush_cache(s);
1548     return false;
1549 }
1550 
1551 static bool cmd_seek(IDEState *s, uint8_t cmd)
1552 {
1553     /* XXX: Check that seek is within bounds */
1554     return true;
1555 }
1556 
1557 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1558 {
1559     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1560 
1561     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1562     if (s->nb_sectors == 0) {
1563         ide_abort_command(s);
1564         return true;
1565     }
1566 
1567     ide_cmd_lba48_transform(s, lba48);
1568     ide_set_sector(s, s->nb_sectors - 1);
1569 
1570     return true;
1571 }
1572 
1573 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1574 {
1575     s->nsector = 0xff; /* device active or idle */
1576     return true;
1577 }
1578 
1579 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1580 {
1581     uint16_t *identify_data;
1582 
1583     if (!s->blk) {
1584         ide_abort_command(s);
1585         return true;
1586     }
1587 
1588     /* XXX: valid for CDROM ? */
1589     switch (s->feature) {
1590     case 0x02: /* write cache enable */
1591         blk_set_enable_write_cache(s->blk, true);
1592         identify_data = (uint16_t *)s->identify_data;
1593         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1594         return true;
1595     case 0x82: /* write cache disable */
1596         blk_set_enable_write_cache(s->blk, false);
1597         identify_data = (uint16_t *)s->identify_data;
1598         put_le16(identify_data + 85, (1 << 14) | 1);
1599         ide_flush_cache(s);
1600         return false;
1601     case 0xcc: /* reverting to power-on defaults enable */
1602     case 0x66: /* reverting to power-on defaults disable */
1603     case 0xaa: /* read look-ahead enable */
1604     case 0x55: /* read look-ahead disable */
1605     case 0x05: /* set advanced power management mode */
1606     case 0x85: /* disable advanced power management mode */
1607     case 0x69: /* NOP */
1608     case 0x67: /* NOP */
1609     case 0x96: /* NOP */
1610     case 0x9a: /* NOP */
1611     case 0x42: /* enable Automatic Acoustic Mode */
1612     case 0xc2: /* disable Automatic Acoustic Mode */
1613         return true;
1614     case 0x03: /* set transfer mode */
1615         {
1616             uint8_t val = s->nsector & 0x07;
1617             identify_data = (uint16_t *)s->identify_data;
1618 
1619             switch (s->nsector >> 3) {
1620             case 0x00: /* pio default */
1621             case 0x01: /* pio mode */
1622                 put_le16(identify_data + 62, 0x07);
1623                 put_le16(identify_data + 63, 0x07);
1624                 put_le16(identify_data + 88, 0x3f);
1625                 break;
1626             case 0x02: /* sigle word dma mode*/
1627                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1628                 put_le16(identify_data + 63, 0x07);
1629                 put_le16(identify_data + 88, 0x3f);
1630                 break;
1631             case 0x04: /* mdma mode */
1632                 put_le16(identify_data + 62, 0x07);
1633                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1634                 put_le16(identify_data + 88, 0x3f);
1635                 break;
1636             case 0x08: /* udma mode */
1637                 put_le16(identify_data + 62, 0x07);
1638                 put_le16(identify_data + 63, 0x07);
1639                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1640                 break;
1641             default:
1642                 goto abort_cmd;
1643             }
1644             return true;
1645         }
1646     }
1647 
1648 abort_cmd:
1649     ide_abort_command(s);
1650     return true;
1651 }
1652 
1653 
1654 /*** ATAPI commands ***/
1655 
1656 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1657 {
1658     ide_atapi_identify(s);
1659     s->status = READY_STAT | SEEK_STAT;
1660     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1661     ide_set_irq(s->bus);
1662     return false;
1663 }
1664 
1665 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1666 {
1667     ide_set_signature(s);
1668 
1669     if (s->drive_kind == IDE_CD) {
1670         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1671                         * devices to return a clear status register
1672                         * with READY_STAT *not* set. */
1673         s->error = 0x01;
1674     } else {
1675         s->status = READY_STAT | SEEK_STAT;
1676         /* The bits of the error register are not as usual for this command!
1677          * They are part of the regular output (this is why ERR_STAT isn't set)
1678          * Device 0 passed, Device 1 passed or not present. */
1679         s->error = 0x01;
1680         ide_set_irq(s->bus);
1681     }
1682 
1683     return false;
1684 }
1685 
1686 static bool cmd_packet(IDEState *s, uint8_t cmd)
1687 {
1688     /* overlapping commands not supported */
1689     if (s->feature & 0x02) {
1690         ide_abort_command(s);
1691         return true;
1692     }
1693 
1694     s->status = READY_STAT | SEEK_STAT;
1695     s->atapi_dma = s->feature & 1;
1696     if (s->atapi_dma) {
1697         s->dma_cmd = IDE_DMA_ATAPI;
1698     }
1699     s->nsector = 1;
1700     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1701                        ide_atapi_cmd);
1702     return false;
1703 }
1704 
1705 
1706 /*** CF-ATA commands ***/
1707 
1708 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1709 {
1710     s->error = 0x09;    /* miscellaneous error */
1711     s->status = READY_STAT | SEEK_STAT;
1712     ide_set_irq(s->bus);
1713 
1714     return false;
1715 }
1716 
1717 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1718 {
1719     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1720      * required for Windows 8 to work with AHCI */
1721 
1722     if (cmd == CFA_WEAR_LEVEL) {
1723         s->nsector = 0;
1724     }
1725 
1726     if (cmd == CFA_ERASE_SECTORS) {
1727         s->media_changed = 1;
1728     }
1729 
1730     return true;
1731 }
1732 
1733 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1734 {
1735     s->status = READY_STAT | SEEK_STAT;
1736 
1737     memset(s->io_buffer, 0, 0x200);
1738     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1739     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1740     s->io_buffer[0x02] = s->select;                 /* Head */
1741     s->io_buffer[0x03] = s->sector;                 /* Sector */
1742     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1743     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1744     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1745     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1746     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1747     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1748     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1749 
1750     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1751     ide_set_irq(s->bus);
1752 
1753     return false;
1754 }
1755 
1756 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1757 {
1758     switch (s->feature) {
1759     case 0x02:  /* Inquiry Metadata Storage */
1760         ide_cfata_metadata_inquiry(s);
1761         break;
1762     case 0x03:  /* Read Metadata Storage */
1763         ide_cfata_metadata_read(s);
1764         break;
1765     case 0x04:  /* Write Metadata Storage */
1766         ide_cfata_metadata_write(s);
1767         break;
1768     default:
1769         ide_abort_command(s);
1770         return true;
1771     }
1772 
1773     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1774     s->status = 0x00; /* NOTE: READY is _not_ set */
1775     ide_set_irq(s->bus);
1776 
1777     return false;
1778 }
1779 
1780 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1781 {
1782     switch (s->feature) {
1783     case 0x01:  /* sense temperature in device */
1784         s->nsector = 0x50;      /* +20 C */
1785         break;
1786     default:
1787         ide_abort_command(s);
1788         return true;
1789     }
1790 
1791     return true;
1792 }
1793 
1794 
1795 /*** SMART commands ***/
1796 
1797 static bool cmd_smart(IDEState *s, uint8_t cmd)
1798 {
1799     int n;
1800 
1801     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1802         goto abort_cmd;
1803     }
1804 
1805     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1806         goto abort_cmd;
1807     }
1808 
1809     switch (s->feature) {
1810     case SMART_DISABLE:
1811         s->smart_enabled = 0;
1812         return true;
1813 
1814     case SMART_ENABLE:
1815         s->smart_enabled = 1;
1816         return true;
1817 
1818     case SMART_ATTR_AUTOSAVE:
1819         switch (s->sector) {
1820         case 0x00:
1821             s->smart_autosave = 0;
1822             break;
1823         case 0xf1:
1824             s->smart_autosave = 1;
1825             break;
1826         default:
1827             goto abort_cmd;
1828         }
1829         return true;
1830 
1831     case SMART_STATUS:
1832         if (!s->smart_errors) {
1833             s->hcyl = 0xc2;
1834             s->lcyl = 0x4f;
1835         } else {
1836             s->hcyl = 0x2c;
1837             s->lcyl = 0xf4;
1838         }
1839         return true;
1840 
1841     case SMART_READ_THRESH:
1842         memset(s->io_buffer, 0, 0x200);
1843         s->io_buffer[0] = 0x01; /* smart struct version */
1844 
1845         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1846             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1847             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1848         }
1849 
1850         /* checksum */
1851         for (n = 0; n < 511; n++) {
1852             s->io_buffer[511] += s->io_buffer[n];
1853         }
1854         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1855 
1856         s->status = READY_STAT | SEEK_STAT;
1857         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1858         ide_set_irq(s->bus);
1859         return false;
1860 
1861     case SMART_READ_DATA:
1862         memset(s->io_buffer, 0, 0x200);
1863         s->io_buffer[0] = 0x01; /* smart struct version */
1864 
1865         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1866             int i;
1867             for (i = 0; i < 11; i++) {
1868                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1869             }
1870         }
1871 
1872         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1873         if (s->smart_selftest_count == 0) {
1874             s->io_buffer[363] = 0;
1875         } else {
1876             s->io_buffer[363] =
1877                 s->smart_selftest_data[3 +
1878                            (s->smart_selftest_count - 1) *
1879                            24];
1880         }
1881         s->io_buffer[364] = 0x20;
1882         s->io_buffer[365] = 0x01;
1883         /* offline data collection capacity: execute + self-test*/
1884         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1885         s->io_buffer[368] = 0x03; /* smart capability (1) */
1886         s->io_buffer[369] = 0x00; /* smart capability (2) */
1887         s->io_buffer[370] = 0x01; /* error logging supported */
1888         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1889         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1890         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1891 
1892         for (n = 0; n < 511; n++) {
1893             s->io_buffer[511] += s->io_buffer[n];
1894         }
1895         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1896 
1897         s->status = READY_STAT | SEEK_STAT;
1898         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1899         ide_set_irq(s->bus);
1900         return false;
1901 
1902     case SMART_READ_LOG:
1903         switch (s->sector) {
1904         case 0x01: /* summary smart error log */
1905             memset(s->io_buffer, 0, 0x200);
1906             s->io_buffer[0] = 0x01;
1907             s->io_buffer[1] = 0x00; /* no error entries */
1908             s->io_buffer[452] = s->smart_errors & 0xff;
1909             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1910 
1911             for (n = 0; n < 511; n++) {
1912                 s->io_buffer[511] += s->io_buffer[n];
1913             }
1914             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1915             break;
1916         case 0x06: /* smart self test log */
1917             memset(s->io_buffer, 0, 0x200);
1918             s->io_buffer[0] = 0x01;
1919             if (s->smart_selftest_count == 0) {
1920                 s->io_buffer[508] = 0;
1921             } else {
1922                 s->io_buffer[508] = s->smart_selftest_count;
1923                 for (n = 2; n < 506; n++)  {
1924                     s->io_buffer[n] = s->smart_selftest_data[n];
1925                 }
1926             }
1927 
1928             for (n = 0; n < 511; n++) {
1929                 s->io_buffer[511] += s->io_buffer[n];
1930             }
1931             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1932             break;
1933         default:
1934             goto abort_cmd;
1935         }
1936         s->status = READY_STAT | SEEK_STAT;
1937         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1938         ide_set_irq(s->bus);
1939         return false;
1940 
1941     case SMART_EXECUTE_OFFLINE:
1942         switch (s->sector) {
1943         case 0: /* off-line routine */
1944         case 1: /* short self test */
1945         case 2: /* extended self test */
1946             s->smart_selftest_count++;
1947             if (s->smart_selftest_count > 21) {
1948                 s->smart_selftest_count = 1;
1949             }
1950             n = 2 + (s->smart_selftest_count - 1) * 24;
1951             s->smart_selftest_data[n] = s->sector;
1952             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1953             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1954             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1955             break;
1956         default:
1957             goto abort_cmd;
1958         }
1959         return true;
1960     }
1961 
1962 abort_cmd:
1963     ide_abort_command(s);
1964     return true;
1965 }
1966 
1967 #define HD_OK (1u << IDE_HD)
1968 #define CD_OK (1u << IDE_CD)
1969 #define CFA_OK (1u << IDE_CFATA)
1970 #define HD_CFA_OK (HD_OK | CFA_OK)
1971 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1972 
1973 /* Set the Disk Seek Completed status bit during completion */
1974 #define SET_DSC (1u << 8)
1975 
1976 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1977 static const struct {
1978     /* Returns true if the completion code should be run */
1979     bool (*handler)(IDEState *s, uint8_t cmd);
1980     int flags;
1981 } ide_cmd_table[0x100] = {
1982     /* NOP not implemented, mandatory for CD */
1983     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1984     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1985     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1986     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1987     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1988     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1989     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1990     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1991     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1992     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1993     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1994     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1995     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1996     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1997     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
1998     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
1999     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2000     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2001     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2002     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2003     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2004     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2005     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2006     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2007     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2008     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2009     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2010     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2011     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2012     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2013     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2014     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2015     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2016     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2017     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2018     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2019     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2020     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2021     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2022     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2023     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2024     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2025     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2026     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2027     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2028     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2029     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2030     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2031     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2032     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2033     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2034     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2035     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2036     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2037     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2038     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2039 };
2040 
2041 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2042 {
2043     return cmd < ARRAY_SIZE(ide_cmd_table)
2044         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2045 }
2046 
2047 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2048 {
2049     IDEState *s;
2050     bool complete;
2051 
2052     s = idebus_active_if(bus);
2053     trace_ide_exec_cmd(bus, s, val);
2054 
2055     /* ignore commands to non existent slave */
2056     if (s != bus->ifs && !s->blk) {
2057         return;
2058     }
2059 
2060     /* Only RESET is allowed while BSY and/or DRQ are set,
2061      * and only to ATAPI devices. */
2062     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2063         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2064             return;
2065         }
2066     }
2067 
2068     if (!ide_cmd_permitted(s, val)) {
2069         ide_abort_command(s);
2070         ide_set_irq(s->bus);
2071         return;
2072     }
2073 
2074     s->status = READY_STAT | BUSY_STAT;
2075     s->error = 0;
2076     s->io_buffer_offset = 0;
2077 
2078     complete = ide_cmd_table[val].handler(s, val);
2079     if (complete) {
2080         s->status &= ~BUSY_STAT;
2081         assert(!!s->error == !!(s->status & ERR_STAT));
2082 
2083         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2084             s->status |= SEEK_STAT;
2085         }
2086 
2087         ide_cmd_done(s);
2088         ide_set_irq(s->bus);
2089     }
2090 }
2091 
2092 /* IOport [R]ead [R]egisters */
2093 enum ATA_IOPORT_RR {
2094     ATA_IOPORT_RR_DATA = 0,
2095     ATA_IOPORT_RR_ERROR = 1,
2096     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2097     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2098     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2099     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2100     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2101     ATA_IOPORT_RR_STATUS = 7,
2102     ATA_IOPORT_RR_NUM_REGISTERS,
2103 };
2104 
2105 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2106     [ATA_IOPORT_RR_DATA] = "Data",
2107     [ATA_IOPORT_RR_ERROR] = "Error",
2108     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2109     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2110     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2111     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2112     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2113     [ATA_IOPORT_RR_STATUS] = "Status"
2114 };
2115 
2116 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2117 {
2118     IDEBus *bus = opaque;
2119     IDEState *s = idebus_active_if(bus);
2120     uint32_t reg_num;
2121     int ret, hob;
2122 
2123     reg_num = addr & 7;
2124     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2125     //hob = s->select & (1 << 7);
2126     hob = 0;
2127     switch (reg_num) {
2128     case ATA_IOPORT_RR_DATA:
2129         ret = 0xff;
2130         break;
2131     case ATA_IOPORT_RR_ERROR:
2132         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2133             (s != bus->ifs && !s->blk)) {
2134             ret = 0;
2135         } else if (!hob) {
2136             ret = s->error;
2137         } else {
2138 	    ret = s->hob_feature;
2139         }
2140         break;
2141     case ATA_IOPORT_RR_SECTOR_COUNT:
2142         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2143             ret = 0;
2144         } else if (!hob) {
2145             ret = s->nsector & 0xff;
2146         } else {
2147 	    ret = s->hob_nsector;
2148         }
2149         break;
2150     case ATA_IOPORT_RR_SECTOR_NUMBER:
2151         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2152             ret = 0;
2153         } else if (!hob) {
2154             ret = s->sector;
2155         } else {
2156 	    ret = s->hob_sector;
2157         }
2158         break;
2159     case ATA_IOPORT_RR_CYLINDER_LOW:
2160         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2161             ret = 0;
2162         } else if (!hob) {
2163             ret = s->lcyl;
2164         } else {
2165 	    ret = s->hob_lcyl;
2166         }
2167         break;
2168     case ATA_IOPORT_RR_CYLINDER_HIGH:
2169         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2170             ret = 0;
2171         } else if (!hob) {
2172             ret = s->hcyl;
2173         } else {
2174 	    ret = s->hob_hcyl;
2175         }
2176         break;
2177     case ATA_IOPORT_RR_DEVICE_HEAD:
2178         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2179             ret = 0;
2180         } else {
2181             ret = s->select;
2182         }
2183         break;
2184     default:
2185     case ATA_IOPORT_RR_STATUS:
2186         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2187             (s != bus->ifs && !s->blk)) {
2188             ret = 0;
2189         } else {
2190             ret = s->status;
2191         }
2192         qemu_irq_lower(bus->irq);
2193         break;
2194     }
2195 
2196     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2197     return ret;
2198 }
2199 
2200 uint32_t ide_status_read(void *opaque, uint32_t addr)
2201 {
2202     IDEBus *bus = opaque;
2203     IDEState *s = idebus_active_if(bus);
2204     int ret;
2205 
2206     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2207         (s != bus->ifs && !s->blk)) {
2208         ret = 0;
2209     } else {
2210         ret = s->status;
2211     }
2212 
2213     trace_ide_status_read(addr, ret, bus, s);
2214     return ret;
2215 }
2216 
2217 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2218 {
2219     IDEBus *bus = opaque;
2220     IDEState *s;
2221     int i;
2222 
2223     trace_ide_cmd_write(addr, val, bus);
2224 
2225     /* common for both drives */
2226     if (!(bus->cmd & IDE_CMD_RESET) &&
2227         (val & IDE_CMD_RESET)) {
2228         /* reset low to high */
2229         for(i = 0;i < 2; i++) {
2230             s = &bus->ifs[i];
2231             s->status = BUSY_STAT | SEEK_STAT;
2232             s->error = 0x01;
2233         }
2234     } else if ((bus->cmd & IDE_CMD_RESET) &&
2235                !(val & IDE_CMD_RESET)) {
2236         /* high to low */
2237         for(i = 0;i < 2; i++) {
2238             s = &bus->ifs[i];
2239             if (s->drive_kind == IDE_CD)
2240                 s->status = 0x00; /* NOTE: READY is _not_ set */
2241             else
2242                 s->status = READY_STAT | SEEK_STAT;
2243             ide_set_signature(s);
2244         }
2245     }
2246 
2247     bus->cmd = val;
2248 }
2249 
2250 /*
2251  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2252  * transferred from the device to the guest), false if it's a PIO in
2253  */
2254 static bool ide_is_pio_out(IDEState *s)
2255 {
2256     if (s->end_transfer_func == ide_sector_write ||
2257         s->end_transfer_func == ide_atapi_cmd) {
2258         return false;
2259     } else if (s->end_transfer_func == ide_sector_read ||
2260                s->end_transfer_func == ide_transfer_stop ||
2261                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2262                s->end_transfer_func == ide_dummy_transfer_stop) {
2263         return true;
2264     }
2265 
2266     abort();
2267 }
2268 
2269 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2270 {
2271     IDEBus *bus = opaque;
2272     IDEState *s = idebus_active_if(bus);
2273     uint8_t *p;
2274 
2275     trace_ide_data_writew(addr, val, bus, s);
2276 
2277     /* PIO data access allowed only when DRQ bit is set. The result of a write
2278      * during PIO out is indeterminate, just ignore it. */
2279     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2280         return;
2281     }
2282 
2283     p = s->data_ptr;
2284     if (p + 2 > s->data_end) {
2285         return;
2286     }
2287 
2288     *(uint16_t *)p = le16_to_cpu(val);
2289     p += 2;
2290     s->data_ptr = p;
2291     if (p >= s->data_end) {
2292         s->status &= ~DRQ_STAT;
2293         s->end_transfer_func(s);
2294     }
2295 }
2296 
2297 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2298 {
2299     IDEBus *bus = opaque;
2300     IDEState *s = idebus_active_if(bus);
2301     uint8_t *p;
2302     int ret;
2303 
2304     /* PIO data access allowed only when DRQ bit is set. The result of a read
2305      * during PIO in is indeterminate, return 0 and don't move forward. */
2306     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2307         return 0;
2308     }
2309 
2310     p = s->data_ptr;
2311     if (p + 2 > s->data_end) {
2312         return 0;
2313     }
2314 
2315     ret = cpu_to_le16(*(uint16_t *)p);
2316     p += 2;
2317     s->data_ptr = p;
2318     if (p >= s->data_end) {
2319         s->status &= ~DRQ_STAT;
2320         s->end_transfer_func(s);
2321     }
2322 
2323     trace_ide_data_readw(addr, ret, bus, s);
2324     return ret;
2325 }
2326 
2327 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2328 {
2329     IDEBus *bus = opaque;
2330     IDEState *s = idebus_active_if(bus);
2331     uint8_t *p;
2332 
2333     trace_ide_data_writel(addr, val, bus, s);
2334 
2335     /* PIO data access allowed only when DRQ bit is set. The result of a write
2336      * during PIO out is indeterminate, just ignore it. */
2337     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2338         return;
2339     }
2340 
2341     p = s->data_ptr;
2342     if (p + 4 > s->data_end) {
2343         return;
2344     }
2345 
2346     *(uint32_t *)p = le32_to_cpu(val);
2347     p += 4;
2348     s->data_ptr = p;
2349     if (p >= s->data_end) {
2350         s->status &= ~DRQ_STAT;
2351         s->end_transfer_func(s);
2352     }
2353 }
2354 
2355 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2356 {
2357     IDEBus *bus = opaque;
2358     IDEState *s = idebus_active_if(bus);
2359     uint8_t *p;
2360     int ret;
2361 
2362     /* PIO data access allowed only when DRQ bit is set. The result of a read
2363      * during PIO in is indeterminate, return 0 and don't move forward. */
2364     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2365         ret = 0;
2366         goto out;
2367     }
2368 
2369     p = s->data_ptr;
2370     if (p + 4 > s->data_end) {
2371         return 0;
2372     }
2373 
2374     ret = cpu_to_le32(*(uint32_t *)p);
2375     p += 4;
2376     s->data_ptr = p;
2377     if (p >= s->data_end) {
2378         s->status &= ~DRQ_STAT;
2379         s->end_transfer_func(s);
2380     }
2381 
2382 out:
2383     trace_ide_data_readl(addr, ret, bus, s);
2384     return ret;
2385 }
2386 
2387 static void ide_dummy_transfer_stop(IDEState *s)
2388 {
2389     s->data_ptr = s->io_buffer;
2390     s->data_end = s->io_buffer;
2391     s->io_buffer[0] = 0xff;
2392     s->io_buffer[1] = 0xff;
2393     s->io_buffer[2] = 0xff;
2394     s->io_buffer[3] = 0xff;
2395 }
2396 
2397 void ide_bus_reset(IDEBus *bus)
2398 {
2399     bus->unit = 0;
2400     bus->cmd = 0;
2401     ide_reset(&bus->ifs[0]);
2402     ide_reset(&bus->ifs[1]);
2403     ide_clear_hob(bus);
2404 
2405     /* pending async DMA */
2406     if (bus->dma->aiocb) {
2407         trace_ide_bus_reset_aio();
2408         blk_aio_cancel(bus->dma->aiocb);
2409         bus->dma->aiocb = NULL;
2410     }
2411 
2412     /* reset dma provider too */
2413     if (bus->dma->ops->reset) {
2414         bus->dma->ops->reset(bus->dma);
2415     }
2416 }
2417 
2418 static bool ide_cd_is_tray_open(void *opaque)
2419 {
2420     return ((IDEState *)opaque)->tray_open;
2421 }
2422 
2423 static bool ide_cd_is_medium_locked(void *opaque)
2424 {
2425     return ((IDEState *)opaque)->tray_locked;
2426 }
2427 
2428 static void ide_resize_cb(void *opaque)
2429 {
2430     IDEState *s = opaque;
2431     uint64_t nb_sectors;
2432 
2433     if (!s->identify_set) {
2434         return;
2435     }
2436 
2437     blk_get_geometry(s->blk, &nb_sectors);
2438     s->nb_sectors = nb_sectors;
2439 
2440     /* Update the identify data buffer. */
2441     if (s->drive_kind == IDE_CFATA) {
2442         ide_cfata_identify_size(s);
2443     } else {
2444         /* IDE_CD uses a different set of callbacks entirely. */
2445         assert(s->drive_kind != IDE_CD);
2446         ide_identify_size(s);
2447     }
2448 }
2449 
2450 static const BlockDevOps ide_cd_block_ops = {
2451     .change_media_cb = ide_cd_change_cb,
2452     .eject_request_cb = ide_cd_eject_request_cb,
2453     .is_tray_open = ide_cd_is_tray_open,
2454     .is_medium_locked = ide_cd_is_medium_locked,
2455 };
2456 
2457 static const BlockDevOps ide_hd_block_ops = {
2458     .resize_cb = ide_resize_cb,
2459 };
2460 
2461 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2462                    const char *version, const char *serial, const char *model,
2463                    uint64_t wwn,
2464                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2465                    int chs_trans, Error **errp)
2466 {
2467     uint64_t nb_sectors;
2468 
2469     s->blk = blk;
2470     s->drive_kind = kind;
2471 
2472     blk_get_geometry(blk, &nb_sectors);
2473     s->cylinders = cylinders;
2474     s->heads = heads;
2475     s->sectors = secs;
2476     s->chs_trans = chs_trans;
2477     s->nb_sectors = nb_sectors;
2478     s->wwn = wwn;
2479     /* The SMART values should be preserved across power cycles
2480        but they aren't.  */
2481     s->smart_enabled = 1;
2482     s->smart_autosave = 1;
2483     s->smart_errors = 0;
2484     s->smart_selftest_count = 0;
2485     if (kind == IDE_CD) {
2486         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2487         blk_set_guest_block_size(blk, 2048);
2488     } else {
2489         if (!blk_is_inserted(s->blk)) {
2490             error_setg(errp, "Device needs media, but drive is empty");
2491             return -1;
2492         }
2493         if (blk_is_read_only(blk)) {
2494             error_setg(errp, "Can't use a read-only drive");
2495             return -1;
2496         }
2497         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2498     }
2499     if (serial) {
2500         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2501     } else {
2502         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2503                  "QM%05d", s->drive_serial);
2504     }
2505     if (model) {
2506         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2507     } else {
2508         switch (kind) {
2509         case IDE_CD:
2510             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2511             break;
2512         case IDE_CFATA:
2513             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2514             break;
2515         default:
2516             strcpy(s->drive_model_str, "QEMU HARDDISK");
2517             break;
2518         }
2519     }
2520 
2521     if (version) {
2522         pstrcpy(s->version, sizeof(s->version), version);
2523     } else {
2524         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2525     }
2526 
2527     ide_reset(s);
2528     blk_iostatus_enable(blk);
2529     return 0;
2530 }
2531 
2532 static void ide_init1(IDEBus *bus, int unit)
2533 {
2534     static int drive_serial = 1;
2535     IDEState *s = &bus->ifs[unit];
2536 
2537     s->bus = bus;
2538     s->unit = unit;
2539     s->drive_serial = drive_serial++;
2540     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2541     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2542     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2543     memset(s->io_buffer, 0, s->io_buffer_total_len);
2544 
2545     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2546     memset(s->smart_selftest_data, 0, 512);
2547 
2548     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2549                                            ide_sector_write_timer_cb, s);
2550 }
2551 
2552 static int ide_nop_int(IDEDMA *dma, int x)
2553 {
2554     return 0;
2555 }
2556 
2557 static void ide_nop(IDEDMA *dma)
2558 {
2559 }
2560 
2561 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2562 {
2563     return 0;
2564 }
2565 
2566 static const IDEDMAOps ide_dma_nop_ops = {
2567     .prepare_buf    = ide_nop_int32,
2568     .restart_dma    = ide_nop,
2569     .rw_buf         = ide_nop_int,
2570 };
2571 
2572 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2573 {
2574     s->unit = s->bus->retry_unit;
2575     ide_set_sector(s, s->bus->retry_sector_num);
2576     s->nsector = s->bus->retry_nsector;
2577     s->bus->dma->ops->restart_dma(s->bus->dma);
2578     s->io_buffer_size = 0;
2579     s->dma_cmd = dma_cmd;
2580     ide_start_dma(s, ide_dma_cb);
2581 }
2582 
2583 static void ide_restart_bh(void *opaque)
2584 {
2585     IDEBus *bus = opaque;
2586     IDEState *s;
2587     bool is_read;
2588     int error_status;
2589 
2590     qemu_bh_delete(bus->bh);
2591     bus->bh = NULL;
2592 
2593     error_status = bus->error_status;
2594     if (bus->error_status == 0) {
2595         return;
2596     }
2597 
2598     s = idebus_active_if(bus);
2599     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2600 
2601     /* The error status must be cleared before resubmitting the request: The
2602      * request may fail again, and this case can only be distinguished if the
2603      * called function can set a new error status. */
2604     bus->error_status = 0;
2605 
2606     /* The HBA has generically asked to be kicked on retry */
2607     if (error_status & IDE_RETRY_HBA) {
2608         if (s->bus->dma->ops->restart) {
2609             s->bus->dma->ops->restart(s->bus->dma);
2610         }
2611     } else if (IS_IDE_RETRY_DMA(error_status)) {
2612         if (error_status & IDE_RETRY_TRIM) {
2613             ide_restart_dma(s, IDE_DMA_TRIM);
2614         } else {
2615             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2616         }
2617     } else if (IS_IDE_RETRY_PIO(error_status)) {
2618         if (is_read) {
2619             ide_sector_read(s);
2620         } else {
2621             ide_sector_write(s);
2622         }
2623     } else if (error_status & IDE_RETRY_FLUSH) {
2624         ide_flush_cache(s);
2625     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2626         assert(s->end_transfer_func == ide_atapi_cmd);
2627         ide_atapi_dma_restart(s);
2628     } else {
2629         abort();
2630     }
2631 }
2632 
2633 static void ide_restart_cb(void *opaque, int running, RunState state)
2634 {
2635     IDEBus *bus = opaque;
2636 
2637     if (!running)
2638         return;
2639 
2640     if (!bus->bh) {
2641         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2642         qemu_bh_schedule(bus->bh);
2643     }
2644 }
2645 
2646 void ide_register_restart_cb(IDEBus *bus)
2647 {
2648     if (bus->dma->ops->restart_dma) {
2649         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2650     }
2651 }
2652 
2653 static IDEDMA ide_dma_nop = {
2654     .ops = &ide_dma_nop_ops,
2655     .aiocb = NULL,
2656 };
2657 
2658 void ide_init2(IDEBus *bus, qemu_irq irq)
2659 {
2660     int i;
2661 
2662     for(i = 0; i < 2; i++) {
2663         ide_init1(bus, i);
2664         ide_reset(&bus->ifs[i]);
2665     }
2666     bus->irq = irq;
2667     bus->dma = &ide_dma_nop;
2668 }
2669 
2670 void ide_exit(IDEState *s)
2671 {
2672     timer_del(s->sector_write_timer);
2673     timer_free(s->sector_write_timer);
2674     qemu_vfree(s->smart_selftest_data);
2675     qemu_vfree(s->io_buffer);
2676 }
2677 
2678 static const MemoryRegionPortio ide_portio_list[] = {
2679     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2680     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2681     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2682     PORTIO_END_OF_LIST(),
2683 };
2684 
2685 static const MemoryRegionPortio ide_portio2_list[] = {
2686     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2687     PORTIO_END_OF_LIST(),
2688 };
2689 
2690 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2691 {
2692     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2693        bridge has been setup properly to always register with ISA.  */
2694     isa_register_portio_list(dev, &bus->portio_list,
2695                              iobase, ide_portio_list, bus, "ide");
2696 
2697     if (iobase2) {
2698         isa_register_portio_list(dev, &bus->portio2_list,
2699                                  iobase2, ide_portio2_list, bus, "ide");
2700     }
2701 }
2702 
2703 static bool is_identify_set(void *opaque, int version_id)
2704 {
2705     IDEState *s = opaque;
2706 
2707     return s->identify_set != 0;
2708 }
2709 
2710 static EndTransferFunc* transfer_end_table[] = {
2711         ide_sector_read,
2712         ide_sector_write,
2713         ide_transfer_stop,
2714         ide_atapi_cmd_reply_end,
2715         ide_atapi_cmd,
2716         ide_dummy_transfer_stop,
2717 };
2718 
2719 static int transfer_end_table_idx(EndTransferFunc *fn)
2720 {
2721     int i;
2722 
2723     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2724         if (transfer_end_table[i] == fn)
2725             return i;
2726 
2727     return -1;
2728 }
2729 
2730 static int ide_drive_post_load(void *opaque, int version_id)
2731 {
2732     IDEState *s = opaque;
2733 
2734     if (s->blk && s->identify_set) {
2735         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2736     }
2737     return 0;
2738 }
2739 
2740 static int ide_drive_pio_post_load(void *opaque, int version_id)
2741 {
2742     IDEState *s = opaque;
2743 
2744     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2745         return -EINVAL;
2746     }
2747     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2748     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2749     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2750     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2751 
2752     return 0;
2753 }
2754 
2755 static int ide_drive_pio_pre_save(void *opaque)
2756 {
2757     IDEState *s = opaque;
2758     int idx;
2759 
2760     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2761     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2762 
2763     idx = transfer_end_table_idx(s->end_transfer_func);
2764     if (idx == -1) {
2765         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2766                         __func__);
2767         s->end_transfer_fn_idx = 2;
2768     } else {
2769         s->end_transfer_fn_idx = idx;
2770     }
2771 
2772     return 0;
2773 }
2774 
2775 static bool ide_drive_pio_state_needed(void *opaque)
2776 {
2777     IDEState *s = opaque;
2778 
2779     return ((s->status & DRQ_STAT) != 0)
2780         || (s->bus->error_status & IDE_RETRY_PIO);
2781 }
2782 
2783 static bool ide_tray_state_needed(void *opaque)
2784 {
2785     IDEState *s = opaque;
2786 
2787     return s->tray_open || s->tray_locked;
2788 }
2789 
2790 static bool ide_atapi_gesn_needed(void *opaque)
2791 {
2792     IDEState *s = opaque;
2793 
2794     return s->events.new_media || s->events.eject_request;
2795 }
2796 
2797 static bool ide_error_needed(void *opaque)
2798 {
2799     IDEBus *bus = opaque;
2800 
2801     return (bus->error_status != 0);
2802 }
2803 
2804 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2805 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2806     .name ="ide_drive/atapi/gesn_state",
2807     .version_id = 1,
2808     .minimum_version_id = 1,
2809     .needed = ide_atapi_gesn_needed,
2810     .fields = (VMStateField[]) {
2811         VMSTATE_BOOL(events.new_media, IDEState),
2812         VMSTATE_BOOL(events.eject_request, IDEState),
2813         VMSTATE_END_OF_LIST()
2814     }
2815 };
2816 
2817 static const VMStateDescription vmstate_ide_tray_state = {
2818     .name = "ide_drive/tray_state",
2819     .version_id = 1,
2820     .minimum_version_id = 1,
2821     .needed = ide_tray_state_needed,
2822     .fields = (VMStateField[]) {
2823         VMSTATE_BOOL(tray_open, IDEState),
2824         VMSTATE_BOOL(tray_locked, IDEState),
2825         VMSTATE_END_OF_LIST()
2826     }
2827 };
2828 
2829 static const VMStateDescription vmstate_ide_drive_pio_state = {
2830     .name = "ide_drive/pio_state",
2831     .version_id = 1,
2832     .minimum_version_id = 1,
2833     .pre_save = ide_drive_pio_pre_save,
2834     .post_load = ide_drive_pio_post_load,
2835     .needed = ide_drive_pio_state_needed,
2836     .fields = (VMStateField[]) {
2837         VMSTATE_INT32(req_nb_sectors, IDEState),
2838         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2839 			     vmstate_info_uint8, uint8_t),
2840         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2841         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2842         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2843         VMSTATE_INT32(elementary_transfer_size, IDEState),
2844         VMSTATE_INT32(packet_transfer_size, IDEState),
2845         VMSTATE_END_OF_LIST()
2846     }
2847 };
2848 
2849 const VMStateDescription vmstate_ide_drive = {
2850     .name = "ide_drive",
2851     .version_id = 3,
2852     .minimum_version_id = 0,
2853     .post_load = ide_drive_post_load,
2854     .fields = (VMStateField[]) {
2855         VMSTATE_INT32(mult_sectors, IDEState),
2856         VMSTATE_INT32(identify_set, IDEState),
2857         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2858         VMSTATE_UINT8(feature, IDEState),
2859         VMSTATE_UINT8(error, IDEState),
2860         VMSTATE_UINT32(nsector, IDEState),
2861         VMSTATE_UINT8(sector, IDEState),
2862         VMSTATE_UINT8(lcyl, IDEState),
2863         VMSTATE_UINT8(hcyl, IDEState),
2864         VMSTATE_UINT8(hob_feature, IDEState),
2865         VMSTATE_UINT8(hob_sector, IDEState),
2866         VMSTATE_UINT8(hob_nsector, IDEState),
2867         VMSTATE_UINT8(hob_lcyl, IDEState),
2868         VMSTATE_UINT8(hob_hcyl, IDEState),
2869         VMSTATE_UINT8(select, IDEState),
2870         VMSTATE_UINT8(status, IDEState),
2871         VMSTATE_UINT8(lba48, IDEState),
2872         VMSTATE_UINT8(sense_key, IDEState),
2873         VMSTATE_UINT8(asc, IDEState),
2874         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2875         VMSTATE_END_OF_LIST()
2876     },
2877     .subsections = (const VMStateDescription*[]) {
2878         &vmstate_ide_drive_pio_state,
2879         &vmstate_ide_tray_state,
2880         &vmstate_ide_atapi_gesn_state,
2881         NULL
2882     }
2883 };
2884 
2885 static const VMStateDescription vmstate_ide_error_status = {
2886     .name ="ide_bus/error",
2887     .version_id = 2,
2888     .minimum_version_id = 1,
2889     .needed = ide_error_needed,
2890     .fields = (VMStateField[]) {
2891         VMSTATE_INT32(error_status, IDEBus),
2892         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2893         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2894         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2895         VMSTATE_END_OF_LIST()
2896     }
2897 };
2898 
2899 const VMStateDescription vmstate_ide_bus = {
2900     .name = "ide_bus",
2901     .version_id = 1,
2902     .minimum_version_id = 1,
2903     .fields = (VMStateField[]) {
2904         VMSTATE_UINT8(cmd, IDEBus),
2905         VMSTATE_UINT8(unit, IDEBus),
2906         VMSTATE_END_OF_LIST()
2907     },
2908     .subsections = (const VMStateDescription*[]) {
2909         &vmstate_ide_error_status,
2910         NULL
2911     }
2912 };
2913 
2914 void ide_drive_get(DriveInfo **hd, int n)
2915 {
2916     int i;
2917 
2918     for (i = 0; i < n; i++) {
2919         hd[i] = drive_get_by_index(IF_IDE, i);
2920     }
2921 }
2922