xref: /openbmc/qemu/hw/ide/core.c (revision e4fdf9df)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/isa/isa.h"
28 #include "migration/vmstate.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/timer.h"
32 #include "qemu/hw-version.h"
33 #include "qemu/memalign.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/blockdev.h"
36 #include "sysemu/dma.h"
37 #include "hw/block/block.h"
38 #include "sysemu/block-backend.h"
39 #include "qapi/error.h"
40 #include "qemu/cutils.h"
41 #include "sysemu/replay.h"
42 #include "sysemu/runstate.h"
43 #include "hw/ide/internal.h"
44 #include "trace.h"
45 
46 /* These values were based on a Seagate ST3500418AS but have been modified
47    to make more sense in QEMU */
48 static const int smart_attributes[][12] = {
49     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
50     /* raw read error rate*/
51     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
52     /* spin up */
53     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54     /* start stop count */
55     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
56     /* remapped sectors */
57     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
58     /* power on hours */
59     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60     /* power cycle count */
61     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
62     /* airflow-temperature-celsius */
63     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
64 };
65 
66 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
67     [IDE_DMA_READ] = "DMA READ",
68     [IDE_DMA_WRITE] = "DMA WRITE",
69     [IDE_DMA_TRIM] = "DMA TRIM",
70     [IDE_DMA_ATAPI] = "DMA ATAPI"
71 };
72 
73 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
74 {
75     if ((unsigned)enval < IDE_DMA__COUNT) {
76         return IDE_DMA_CMD_lookup[enval];
77     }
78     return "DMA UNKNOWN CMD";
79 }
80 
81 static void ide_dummy_transfer_stop(IDEState *s);
82 
83 static void padstr(char *str, const char *src, int len)
84 {
85     int i, v;
86     for(i = 0; i < len; i++) {
87         if (*src)
88             v = *src++;
89         else
90             v = ' ';
91         str[i^1] = v;
92     }
93 }
94 
95 static void put_le16(uint16_t *p, unsigned int v)
96 {
97     *p = cpu_to_le16(v);
98 }
99 
100 static void ide_identify_size(IDEState *s)
101 {
102     uint16_t *p = (uint16_t *)s->identify_data;
103     int64_t nb_sectors_lba28 = s->nb_sectors;
104     if (nb_sectors_lba28 >= 1 << 28) {
105         nb_sectors_lba28 = (1 << 28) - 1;
106     }
107     put_le16(p + 60, nb_sectors_lba28);
108     put_le16(p + 61, nb_sectors_lba28 >> 16);
109     put_le16(p + 100, s->nb_sectors);
110     put_le16(p + 101, s->nb_sectors >> 16);
111     put_le16(p + 102, s->nb_sectors >> 32);
112     put_le16(p + 103, s->nb_sectors >> 48);
113 }
114 
115 static void ide_identify(IDEState *s)
116 {
117     uint16_t *p;
118     unsigned int oldsize;
119     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
120 
121     p = (uint16_t *)s->identify_data;
122     if (s->identify_set) {
123         goto fill_buffer;
124     }
125     memset(p, 0, sizeof(s->identify_data));
126 
127     put_le16(p + 0, 0x0040);
128     put_le16(p + 1, s->cylinders);
129     put_le16(p + 3, s->heads);
130     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
131     put_le16(p + 5, 512); /* XXX: retired, remove ? */
132     put_le16(p + 6, s->sectors);
133     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
134     put_le16(p + 20, 3); /* XXX: retired, remove ? */
135     put_le16(p + 21, 512); /* cache size in sectors */
136     put_le16(p + 22, 4); /* ecc bytes */
137     padstr((char *)(p + 23), s->version, 8); /* firmware version */
138     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
139 #if MAX_MULT_SECTORS > 1
140     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
141 #endif
142     put_le16(p + 48, 1); /* dword I/O */
143     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
144     put_le16(p + 51, 0x200); /* PIO transfer cycle */
145     put_le16(p + 52, 0x200); /* DMA transfer cycle */
146     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
147     put_le16(p + 54, s->cylinders);
148     put_le16(p + 55, s->heads);
149     put_le16(p + 56, s->sectors);
150     oldsize = s->cylinders * s->heads * s->sectors;
151     put_le16(p + 57, oldsize);
152     put_le16(p + 58, oldsize >> 16);
153     if (s->mult_sectors)
154         put_le16(p + 59, 0x100 | s->mult_sectors);
155     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
156     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
157     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
158     put_le16(p + 63, 0x07); /* mdma0-2 supported */
159     put_le16(p + 64, 0x03); /* pio3-4 supported */
160     put_le16(p + 65, 120);
161     put_le16(p + 66, 120);
162     put_le16(p + 67, 120);
163     put_le16(p + 68, 120);
164     if (dev && dev->conf.discard_granularity) {
165         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
166     }
167 
168     if (s->ncq_queues) {
169         put_le16(p + 75, s->ncq_queues - 1);
170         /* NCQ supported */
171         put_le16(p + 76, (1 << 8));
172     }
173 
174     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
175     put_le16(p + 81, 0x16); /* conforms to ata5 */
176     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
177     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
178     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
179     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
180     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
181     if (s->wwn) {
182         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
183     } else {
184         put_le16(p + 84, (1 << 14) | 0);
185     }
186     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
187     if (blk_enable_write_cache(s->blk)) {
188         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
189     } else {
190         put_le16(p + 85, (1 << 14) | 1);
191     }
192     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
193     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
194     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
195     if (s->wwn) {
196         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
197     } else {
198         put_le16(p + 87, (1 << 14) | 0);
199     }
200     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
201     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
202     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
203     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
204     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
205     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
206 
207     if (dev && dev->conf.physical_block_size)
208         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
209     if (s->wwn) {
210         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
211         put_le16(p + 108, s->wwn >> 48);
212         put_le16(p + 109, s->wwn >> 32);
213         put_le16(p + 110, s->wwn >> 16);
214         put_le16(p + 111, s->wwn);
215     }
216     if (dev && dev->conf.discard_granularity) {
217         put_le16(p + 169, 1); /* TRIM support */
218     }
219     if (dev) {
220         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
221     }
222 
223     ide_identify_size(s);
224     s->identify_set = 1;
225 
226 fill_buffer:
227     memcpy(s->io_buffer, p, sizeof(s->identify_data));
228 }
229 
230 static void ide_atapi_identify(IDEState *s)
231 {
232     uint16_t *p;
233 
234     p = (uint16_t *)s->identify_data;
235     if (s->identify_set) {
236         goto fill_buffer;
237     }
238     memset(p, 0, sizeof(s->identify_data));
239 
240     /* Removable CDROM, 50us response, 12 byte packets */
241     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
242     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
243     put_le16(p + 20, 3); /* buffer type */
244     put_le16(p + 21, 512); /* cache size in sectors */
245     put_le16(p + 22, 4); /* ecc bytes */
246     padstr((char *)(p + 23), s->version, 8); /* firmware version */
247     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
248     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
249 #ifdef USE_DMA_CDROM
250     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
251     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
252     put_le16(p + 62, 7);  /* single word dma0-2 supported */
253     put_le16(p + 63, 7);  /* mdma0-2 supported */
254 #else
255     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
256     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
257     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
258 #endif
259     put_le16(p + 64, 3); /* pio3-4 supported */
260     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
261     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
262     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
263     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
264 
265     put_le16(p + 71, 30); /* in ns */
266     put_le16(p + 72, 30); /* in ns */
267 
268     if (s->ncq_queues) {
269         put_le16(p + 75, s->ncq_queues - 1);
270         /* NCQ supported */
271         put_le16(p + 76, (1 << 8));
272     }
273 
274     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
275     if (s->wwn) {
276         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
277         put_le16(p + 87, (1 << 8)); /* WWN enabled */
278     }
279 
280 #ifdef USE_DMA_CDROM
281     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
282 #endif
283 
284     if (s->wwn) {
285         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
286         put_le16(p + 108, s->wwn >> 48);
287         put_le16(p + 109, s->wwn >> 32);
288         put_le16(p + 110, s->wwn >> 16);
289         put_le16(p + 111, s->wwn);
290     }
291 
292     s->identify_set = 1;
293 
294 fill_buffer:
295     memcpy(s->io_buffer, p, sizeof(s->identify_data));
296 }
297 
298 static void ide_cfata_identify_size(IDEState *s)
299 {
300     uint16_t *p = (uint16_t *)s->identify_data;
301     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
302     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
303     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
304     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
305 }
306 
307 static void ide_cfata_identify(IDEState *s)
308 {
309     uint16_t *p;
310     uint32_t cur_sec;
311 
312     p = (uint16_t *)s->identify_data;
313     if (s->identify_set) {
314         goto fill_buffer;
315     }
316     memset(p, 0, sizeof(s->identify_data));
317 
318     cur_sec = s->cylinders * s->heads * s->sectors;
319 
320     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
321     put_le16(p + 1, s->cylinders);		/* Default cylinders */
322     put_le16(p + 3, s->heads);			/* Default heads */
323     put_le16(p + 6, s->sectors);		/* Default sectors per track */
324     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
325     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
326     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
327     put_le16(p + 22, 0x0004);			/* ECC bytes */
328     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
329     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
330 #if MAX_MULT_SECTORS > 1
331     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
332 #else
333     put_le16(p + 47, 0x0000);
334 #endif
335     put_le16(p + 49, 0x0f00);			/* Capabilities */
336     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
337     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
338     put_le16(p + 53, 0x0003);			/* Translation params valid */
339     put_le16(p + 54, s->cylinders);		/* Current cylinders */
340     put_le16(p + 55, s->heads);			/* Current heads */
341     put_le16(p + 56, s->sectors);		/* Current sectors */
342     put_le16(p + 57, cur_sec);			/* Current capacity */
343     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
344     if (s->mult_sectors)			/* Multiple sector setting */
345         put_le16(p + 59, 0x100 | s->mult_sectors);
346     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
347     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
348     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
349     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
350     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
351     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
352     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
353     put_le16(p + 82, 0x400c);			/* Command Set supported */
354     put_le16(p + 83, 0x7068);			/* Command Set supported */
355     put_le16(p + 84, 0x4000);			/* Features supported */
356     put_le16(p + 85, 0x000c);			/* Command Set enabled */
357     put_le16(p + 86, 0x7044);			/* Command Set enabled */
358     put_le16(p + 87, 0x4000);			/* Features enabled */
359     put_le16(p + 91, 0x4060);			/* Current APM level */
360     put_le16(p + 129, 0x0002);			/* Current features option */
361     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
362     put_le16(p + 131, 0x0001);			/* Initial power mode */
363     put_le16(p + 132, 0x0000);			/* User signature */
364     put_le16(p + 160, 0x8100);			/* Power requirement */
365     put_le16(p + 161, 0x8001);			/* CF command set */
366 
367     ide_cfata_identify_size(s);
368     s->identify_set = 1;
369 
370 fill_buffer:
371     memcpy(s->io_buffer, p, sizeof(s->identify_data));
372 }
373 
374 static void ide_set_signature(IDEState *s)
375 {
376     s->select &= ~(ATA_DEV_HS); /* clear head */
377     /* put signature */
378     s->nsector = 1;
379     s->sector = 1;
380     if (s->drive_kind == IDE_CD) {
381         s->lcyl = 0x14;
382         s->hcyl = 0xeb;
383     } else if (s->blk) {
384         s->lcyl = 0;
385         s->hcyl = 0;
386     } else {
387         s->lcyl = 0xff;
388         s->hcyl = 0xff;
389     }
390 }
391 
392 static bool ide_sect_range_ok(IDEState *s,
393                               uint64_t sector, uint64_t nb_sectors)
394 {
395     uint64_t total_sectors;
396 
397     blk_get_geometry(s->blk, &total_sectors);
398     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
399         return false;
400     }
401     return true;
402 }
403 
404 typedef struct TrimAIOCB {
405     BlockAIOCB common;
406     IDEState *s;
407     QEMUBH *bh;
408     int ret;
409     QEMUIOVector *qiov;
410     BlockAIOCB *aiocb;
411     int i, j;
412 } TrimAIOCB;
413 
414 static void trim_aio_cancel(BlockAIOCB *acb)
415 {
416     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
417 
418     /* Exit the loop so ide_issue_trim_cb will not continue  */
419     iocb->j = iocb->qiov->niov - 1;
420     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
421 
422     iocb->ret = -ECANCELED;
423 
424     if (iocb->aiocb) {
425         blk_aio_cancel_async(iocb->aiocb);
426         iocb->aiocb = NULL;
427     }
428 }
429 
430 static const AIOCBInfo trim_aiocb_info = {
431     .aiocb_size         = sizeof(TrimAIOCB),
432     .cancel_async       = trim_aio_cancel,
433 };
434 
435 static void ide_trim_bh_cb(void *opaque)
436 {
437     TrimAIOCB *iocb = opaque;
438     BlockBackend *blk = iocb->s->blk;
439 
440     iocb->common.cb(iocb->common.opaque, iocb->ret);
441 
442     qemu_bh_delete(iocb->bh);
443     iocb->bh = NULL;
444     qemu_aio_unref(iocb);
445 
446     /* Paired with an increment in ide_issue_trim() */
447     blk_dec_in_flight(blk);
448 }
449 
450 static void ide_issue_trim_cb(void *opaque, int ret)
451 {
452     TrimAIOCB *iocb = opaque;
453     IDEState *s = iocb->s;
454 
455     if (iocb->i >= 0) {
456         if (ret >= 0) {
457             block_acct_done(blk_get_stats(s->blk), &s->acct);
458         } else {
459             block_acct_failed(blk_get_stats(s->blk), &s->acct);
460         }
461     }
462 
463     if (ret >= 0) {
464         while (iocb->j < iocb->qiov->niov) {
465             int j = iocb->j;
466             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
467                 int i = iocb->i;
468                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
469 
470                 /* 6-byte LBA + 2-byte range per entry */
471                 uint64_t entry = le64_to_cpu(buffer[i]);
472                 uint64_t sector = entry & 0x0000ffffffffffffULL;
473                 uint16_t count = entry >> 48;
474 
475                 if (count == 0) {
476                     continue;
477                 }
478 
479                 if (!ide_sect_range_ok(s, sector, count)) {
480                     block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
481                     iocb->ret = -EINVAL;
482                     goto done;
483                 }
484 
485                 block_acct_start(blk_get_stats(s->blk), &s->acct,
486                                  count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
487 
488                 /* Got an entry! Submit and exit.  */
489                 iocb->aiocb = blk_aio_pdiscard(s->blk,
490                                                sector << BDRV_SECTOR_BITS,
491                                                count << BDRV_SECTOR_BITS,
492                                                ide_issue_trim_cb, opaque);
493                 return;
494             }
495 
496             iocb->j++;
497             iocb->i = -1;
498         }
499     } else {
500         iocb->ret = ret;
501     }
502 
503 done:
504     iocb->aiocb = NULL;
505     if (iocb->bh) {
506         replay_bh_schedule_event(iocb->bh);
507     }
508 }
509 
510 BlockAIOCB *ide_issue_trim(
511         int64_t offset, QEMUIOVector *qiov,
512         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
513 {
514     IDEState *s = opaque;
515     TrimAIOCB *iocb;
516 
517     /* Paired with a decrement in ide_trim_bh_cb() */
518     blk_inc_in_flight(s->blk);
519 
520     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
521     iocb->s = s;
522     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
523     iocb->ret = 0;
524     iocb->qiov = qiov;
525     iocb->i = -1;
526     iocb->j = 0;
527     ide_issue_trim_cb(iocb, 0);
528     return &iocb->common;
529 }
530 
531 void ide_abort_command(IDEState *s)
532 {
533     ide_transfer_stop(s);
534     s->status = READY_STAT | ERR_STAT;
535     s->error = ABRT_ERR;
536 }
537 
538 static void ide_set_retry(IDEState *s)
539 {
540     s->bus->retry_unit = s->unit;
541     s->bus->retry_sector_num = ide_get_sector(s);
542     s->bus->retry_nsector = s->nsector;
543 }
544 
545 static void ide_clear_retry(IDEState *s)
546 {
547     s->bus->retry_unit = -1;
548     s->bus->retry_sector_num = 0;
549     s->bus->retry_nsector = 0;
550 }
551 
552 /* prepare data transfer and tell what to do after */
553 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
554                                   EndTransferFunc *end_transfer_func)
555 {
556     s->data_ptr = buf;
557     s->data_end = buf + size;
558     ide_set_retry(s);
559     if (!(s->status & ERR_STAT)) {
560         s->status |= DRQ_STAT;
561     }
562     if (!s->bus->dma->ops->pio_transfer) {
563         s->end_transfer_func = end_transfer_func;
564         return false;
565     }
566     s->bus->dma->ops->pio_transfer(s->bus->dma);
567     return true;
568 }
569 
570 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
571                         EndTransferFunc *end_transfer_func)
572 {
573     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
574         end_transfer_func(s);
575     }
576 }
577 
578 static void ide_cmd_done(IDEState *s)
579 {
580     if (s->bus->dma->ops->cmd_done) {
581         s->bus->dma->ops->cmd_done(s->bus->dma);
582     }
583 }
584 
585 static void ide_transfer_halt(IDEState *s)
586 {
587     s->end_transfer_func = ide_transfer_stop;
588     s->data_ptr = s->io_buffer;
589     s->data_end = s->io_buffer;
590     s->status &= ~DRQ_STAT;
591 }
592 
593 void ide_transfer_stop(IDEState *s)
594 {
595     ide_transfer_halt(s);
596     ide_cmd_done(s);
597 }
598 
599 int64_t ide_get_sector(IDEState *s)
600 {
601     int64_t sector_num;
602     if (s->select & (ATA_DEV_LBA)) {
603         if (s->lba48) {
604             sector_num = ((int64_t)s->hob_hcyl << 40) |
605                 ((int64_t) s->hob_lcyl << 32) |
606                 ((int64_t) s->hob_sector << 24) |
607                 ((int64_t) s->hcyl << 16) |
608                 ((int64_t) s->lcyl << 8) | s->sector;
609         } else {
610             /* LBA28 */
611             sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
612                 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
613         }
614     } else {
615         /* CHS */
616         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
617             (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
618     }
619 
620     return sector_num;
621 }
622 
623 void ide_set_sector(IDEState *s, int64_t sector_num)
624 {
625     unsigned int cyl, r;
626     if (s->select & (ATA_DEV_LBA)) {
627         if (s->lba48) {
628             s->sector = sector_num;
629             s->lcyl = sector_num >> 8;
630             s->hcyl = sector_num >> 16;
631             s->hob_sector = sector_num >> 24;
632             s->hob_lcyl = sector_num >> 32;
633             s->hob_hcyl = sector_num >> 40;
634         } else {
635             /* LBA28 */
636             s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
637                 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
638             s->hcyl = (sector_num >> 16);
639             s->lcyl = (sector_num >> 8);
640             s->sector = (sector_num);
641         }
642     } else {
643         /* CHS */
644         cyl = sector_num / (s->heads * s->sectors);
645         r = sector_num % (s->heads * s->sectors);
646         s->hcyl = cyl >> 8;
647         s->lcyl = cyl;
648         s->select = (s->select & ~(ATA_DEV_HS)) |
649             ((r / s->sectors) & (ATA_DEV_HS));
650         s->sector = (r % s->sectors) + 1;
651     }
652 }
653 
654 static void ide_rw_error(IDEState *s) {
655     ide_abort_command(s);
656     ide_set_irq(s->bus);
657 }
658 
659 static void ide_buffered_readv_cb(void *opaque, int ret)
660 {
661     IDEBufferedRequest *req = opaque;
662     if (!req->orphaned) {
663         if (!ret) {
664             assert(req->qiov.size == req->original_qiov->size);
665             qemu_iovec_from_buf(req->original_qiov, 0,
666                                 req->qiov.local_iov.iov_base,
667                                 req->original_qiov->size);
668         }
669         req->original_cb(req->original_opaque, ret);
670     }
671     QLIST_REMOVE(req, list);
672     qemu_vfree(qemu_iovec_buf(&req->qiov));
673     g_free(req);
674 }
675 
676 #define MAX_BUFFERED_REQS 16
677 
678 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
679                                QEMUIOVector *iov, int nb_sectors,
680                                BlockCompletionFunc *cb, void *opaque)
681 {
682     BlockAIOCB *aioreq;
683     IDEBufferedRequest *req;
684     int c = 0;
685 
686     QLIST_FOREACH(req, &s->buffered_requests, list) {
687         c++;
688     }
689     if (c > MAX_BUFFERED_REQS) {
690         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
691     }
692 
693     req = g_new0(IDEBufferedRequest, 1);
694     req->original_qiov = iov;
695     req->original_cb = cb;
696     req->original_opaque = opaque;
697     qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
698                         iov->size);
699 
700     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
701                             &req->qiov, 0, ide_buffered_readv_cb, req);
702 
703     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
704     return aioreq;
705 }
706 
707 /**
708  * Cancel all pending DMA requests.
709  * Any buffered DMA requests are instantly canceled,
710  * but any pending unbuffered DMA requests must be waited on.
711  */
712 void ide_cancel_dma_sync(IDEState *s)
713 {
714     IDEBufferedRequest *req;
715 
716     /* First invoke the callbacks of all buffered requests
717      * and flag those requests as orphaned. Ideally there
718      * are no unbuffered (Scatter Gather DMA Requests or
719      * write requests) pending and we can avoid to drain. */
720     QLIST_FOREACH(req, &s->buffered_requests, list) {
721         if (!req->orphaned) {
722             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
723             req->original_cb(req->original_opaque, -ECANCELED);
724         }
725         req->orphaned = true;
726     }
727 
728     /*
729      * We can't cancel Scatter Gather DMA in the middle of the
730      * operation or a partial (not full) DMA transfer would reach
731      * the storage so we wait for completion instead (we behave
732      * like if the DMA was completed by the time the guest trying
733      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
734      * set).
735      *
736      * In the future we'll be able to safely cancel the I/O if the
737      * whole DMA operation will be submitted to disk with a single
738      * aio operation with preadv/pwritev.
739      */
740     if (s->bus->dma->aiocb) {
741         trace_ide_cancel_dma_sync_remaining();
742         blk_drain(s->blk);
743         assert(s->bus->dma->aiocb == NULL);
744     }
745 }
746 
747 static void ide_sector_read(IDEState *s);
748 
749 static void ide_sector_read_cb(void *opaque, int ret)
750 {
751     IDEState *s = opaque;
752     int n;
753 
754     s->pio_aiocb = NULL;
755     s->status &= ~BUSY_STAT;
756 
757     if (ret != 0) {
758         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
759                                 IDE_RETRY_READ)) {
760             return;
761         }
762     }
763 
764     block_acct_done(blk_get_stats(s->blk), &s->acct);
765 
766     n = s->nsector;
767     if (n > s->req_nb_sectors) {
768         n = s->req_nb_sectors;
769     }
770 
771     ide_set_sector(s, ide_get_sector(s) + n);
772     s->nsector -= n;
773     /* Allow the guest to read the io_buffer */
774     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
775     ide_set_irq(s->bus);
776 }
777 
778 static void ide_sector_read(IDEState *s)
779 {
780     int64_t sector_num;
781     int n;
782 
783     s->status = READY_STAT | SEEK_STAT;
784     s->error = 0; /* not needed by IDE spec, but needed by Windows */
785     sector_num = ide_get_sector(s);
786     n = s->nsector;
787 
788     if (n == 0) {
789         ide_transfer_stop(s);
790         return;
791     }
792 
793     s->status |= BUSY_STAT;
794 
795     if (n > s->req_nb_sectors) {
796         n = s->req_nb_sectors;
797     }
798 
799     trace_ide_sector_read(sector_num, n);
800 
801     if (!ide_sect_range_ok(s, sector_num, n)) {
802         ide_rw_error(s);
803         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
804         return;
805     }
806 
807     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
808 
809     block_acct_start(blk_get_stats(s->blk), &s->acct,
810                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
811     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
812                                       ide_sector_read_cb, s);
813 }
814 
815 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
816 {
817     if (s->bus->dma->ops->commit_buf) {
818         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
819     }
820     s->io_buffer_offset += tx_bytes;
821     qemu_sglist_destroy(&s->sg);
822 }
823 
824 void ide_set_inactive(IDEState *s, bool more)
825 {
826     s->bus->dma->aiocb = NULL;
827     ide_clear_retry(s);
828     if (s->bus->dma->ops->set_inactive) {
829         s->bus->dma->ops->set_inactive(s->bus->dma, more);
830     }
831     ide_cmd_done(s);
832 }
833 
834 void ide_dma_error(IDEState *s)
835 {
836     dma_buf_commit(s, 0);
837     ide_abort_command(s);
838     ide_set_inactive(s, false);
839     ide_set_irq(s->bus);
840 }
841 
842 int ide_handle_rw_error(IDEState *s, int error, int op)
843 {
844     bool is_read = (op & IDE_RETRY_READ) != 0;
845     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
846 
847     if (action == BLOCK_ERROR_ACTION_STOP) {
848         assert(s->bus->retry_unit == s->unit);
849         s->bus->error_status = op;
850     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
851         block_acct_failed(blk_get_stats(s->blk), &s->acct);
852         if (IS_IDE_RETRY_DMA(op)) {
853             ide_dma_error(s);
854         } else if (IS_IDE_RETRY_ATAPI(op)) {
855             ide_atapi_io_error(s, -error);
856         } else {
857             ide_rw_error(s);
858         }
859     }
860     blk_error_action(s->blk, action, is_read, error);
861     return action != BLOCK_ERROR_ACTION_IGNORE;
862 }
863 
864 static void ide_dma_cb(void *opaque, int ret)
865 {
866     IDEState *s = opaque;
867     int n;
868     int64_t sector_num;
869     uint64_t offset;
870     bool stay_active = false;
871     int32_t prep_size = 0;
872 
873     if (ret == -EINVAL) {
874         ide_dma_error(s);
875         return;
876     }
877 
878     if (ret < 0) {
879         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
880             s->bus->dma->aiocb = NULL;
881             dma_buf_commit(s, 0);
882             return;
883         }
884     }
885 
886     if (s->io_buffer_size > s->nsector * 512) {
887         /*
888          * The PRDs were longer than needed for this request.
889          * The Active bit must remain set after the request completes.
890          */
891         n = s->nsector;
892         stay_active = true;
893     } else {
894         n = s->io_buffer_size >> 9;
895     }
896 
897     sector_num = ide_get_sector(s);
898     if (n > 0) {
899         assert(n * 512 == s->sg.size);
900         dma_buf_commit(s, s->sg.size);
901         sector_num += n;
902         ide_set_sector(s, sector_num);
903         s->nsector -= n;
904     }
905 
906     /* end of transfer ? */
907     if (s->nsector == 0) {
908         s->status = READY_STAT | SEEK_STAT;
909         ide_set_irq(s->bus);
910         goto eot;
911     }
912 
913     /* launch next transfer */
914     n = s->nsector;
915     s->io_buffer_index = 0;
916     s->io_buffer_size = n * 512;
917     prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
918     /* prepare_buf() must succeed and respect the limit */
919     assert(prep_size >= 0 && prep_size <= n * 512);
920 
921     /*
922      * Now prep_size stores the number of bytes in the sglist, and
923      * s->io_buffer_size stores the number of bytes described by the PRDs.
924      */
925 
926     if (prep_size < n * 512) {
927         /*
928          * The PRDs are too short for this request. Error condition!
929          * Reset the Active bit and don't raise the interrupt.
930          */
931         s->status = READY_STAT | SEEK_STAT;
932         dma_buf_commit(s, 0);
933         goto eot;
934     }
935 
936     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
937 
938     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
939         !ide_sect_range_ok(s, sector_num, n)) {
940         ide_dma_error(s);
941         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
942         return;
943     }
944 
945     offset = sector_num << BDRV_SECTOR_BITS;
946     switch (s->dma_cmd) {
947     case IDE_DMA_READ:
948         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
949                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
950         break;
951     case IDE_DMA_WRITE:
952         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
953                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
954         break;
955     case IDE_DMA_TRIM:
956         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
957                                         &s->sg, offset, BDRV_SECTOR_SIZE,
958                                         ide_issue_trim, s, ide_dma_cb, s,
959                                         DMA_DIRECTION_TO_DEVICE);
960         break;
961     default:
962         abort();
963     }
964     return;
965 
966 eot:
967     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
968         block_acct_done(blk_get_stats(s->blk), &s->acct);
969     }
970     ide_set_inactive(s, stay_active);
971 }
972 
973 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
974 {
975     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
976     s->io_buffer_size = 0;
977     s->dma_cmd = dma_cmd;
978 
979     switch (dma_cmd) {
980     case IDE_DMA_READ:
981         block_acct_start(blk_get_stats(s->blk), &s->acct,
982                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
983         break;
984     case IDE_DMA_WRITE:
985         block_acct_start(blk_get_stats(s->blk), &s->acct,
986                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
987         break;
988     default:
989         break;
990     }
991 
992     ide_start_dma(s, ide_dma_cb);
993 }
994 
995 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
996 {
997     s->io_buffer_index = 0;
998     ide_set_retry(s);
999     if (s->bus->dma->ops->start_dma) {
1000         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
1001     }
1002 }
1003 
1004 static void ide_sector_write(IDEState *s);
1005 
1006 static void ide_sector_write_timer_cb(void *opaque)
1007 {
1008     IDEState *s = opaque;
1009     ide_set_irq(s->bus);
1010 }
1011 
1012 static void ide_sector_write_cb(void *opaque, int ret)
1013 {
1014     IDEState *s = opaque;
1015     int n;
1016 
1017     s->pio_aiocb = NULL;
1018     s->status &= ~BUSY_STAT;
1019 
1020     if (ret != 0) {
1021         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1022             return;
1023         }
1024     }
1025 
1026     block_acct_done(blk_get_stats(s->blk), &s->acct);
1027 
1028     n = s->nsector;
1029     if (n > s->req_nb_sectors) {
1030         n = s->req_nb_sectors;
1031     }
1032     s->nsector -= n;
1033 
1034     ide_set_sector(s, ide_get_sector(s) + n);
1035     if (s->nsector == 0) {
1036         /* no more sectors to write */
1037         ide_transfer_stop(s);
1038     } else {
1039         int n1 = s->nsector;
1040         if (n1 > s->req_nb_sectors) {
1041             n1 = s->req_nb_sectors;
1042         }
1043         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1044                            ide_sector_write);
1045     }
1046 
1047     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1048         /* It seems there is a bug in the Windows 2000 installer HDD
1049            IDE driver which fills the disk with empty logs when the
1050            IDE write IRQ comes too early. This hack tries to correct
1051            that at the expense of slower write performances. Use this
1052            option _only_ to install Windows 2000. You must disable it
1053            for normal use. */
1054         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1055                   (NANOSECONDS_PER_SECOND / 1000));
1056     } else {
1057         ide_set_irq(s->bus);
1058     }
1059 }
1060 
1061 static void ide_sector_write(IDEState *s)
1062 {
1063     int64_t sector_num;
1064     int n;
1065 
1066     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1067     sector_num = ide_get_sector(s);
1068 
1069     n = s->nsector;
1070     if (n > s->req_nb_sectors) {
1071         n = s->req_nb_sectors;
1072     }
1073 
1074     trace_ide_sector_write(sector_num, n);
1075 
1076     if (!ide_sect_range_ok(s, sector_num, n)) {
1077         ide_rw_error(s);
1078         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1079         return;
1080     }
1081 
1082     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1083 
1084     block_acct_start(blk_get_stats(s->blk), &s->acct,
1085                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1086     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1087                                    &s->qiov, 0, ide_sector_write_cb, s);
1088 }
1089 
1090 static void ide_flush_cb(void *opaque, int ret)
1091 {
1092     IDEState *s = opaque;
1093 
1094     s->pio_aiocb = NULL;
1095 
1096     if (ret < 0) {
1097         /* XXX: What sector number to set here? */
1098         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1099             return;
1100         }
1101     }
1102 
1103     if (s->blk) {
1104         block_acct_done(blk_get_stats(s->blk), &s->acct);
1105     }
1106     s->status = READY_STAT | SEEK_STAT;
1107     ide_cmd_done(s);
1108     ide_set_irq(s->bus);
1109 }
1110 
1111 static void ide_flush_cache(IDEState *s)
1112 {
1113     if (s->blk == NULL) {
1114         ide_flush_cb(s, 0);
1115         return;
1116     }
1117 
1118     s->status |= BUSY_STAT;
1119     ide_set_retry(s);
1120     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1121     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1122 }
1123 
1124 static void ide_cfata_metadata_inquiry(IDEState *s)
1125 {
1126     uint16_t *p;
1127     uint32_t spd;
1128 
1129     p = (uint16_t *) s->io_buffer;
1130     memset(p, 0, 0x200);
1131     spd = ((s->mdata_size - 1) >> 9) + 1;
1132 
1133     put_le16(p + 0, 0x0001);			/* Data format revision */
1134     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1135     put_le16(p + 2, s->media_changed);		/* Media status */
1136     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1137     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1138     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1139     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1140 }
1141 
1142 static void ide_cfata_metadata_read(IDEState *s)
1143 {
1144     uint16_t *p;
1145 
1146     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1147         s->status = ERR_STAT;
1148         s->error = ABRT_ERR;
1149         return;
1150     }
1151 
1152     p = (uint16_t *) s->io_buffer;
1153     memset(p, 0, 0x200);
1154 
1155     put_le16(p + 0, s->media_changed);		/* Media status */
1156     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1157                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1158                                     s->nsector << 9), 0x200 - 2));
1159 }
1160 
1161 static void ide_cfata_metadata_write(IDEState *s)
1162 {
1163     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1164         s->status = ERR_STAT;
1165         s->error = ABRT_ERR;
1166         return;
1167     }
1168 
1169     s->media_changed = 0;
1170 
1171     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1172                     s->io_buffer + 2,
1173                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1174                                     s->nsector << 9), 0x200 - 2));
1175 }
1176 
1177 /* called when the inserted state of the media has changed */
1178 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1179 {
1180     IDEState *s = opaque;
1181     uint64_t nb_sectors;
1182 
1183     s->tray_open = !load;
1184     blk_get_geometry(s->blk, &nb_sectors);
1185     s->nb_sectors = nb_sectors;
1186 
1187     /*
1188      * First indicate to the guest that a CD has been removed.  That's
1189      * done on the next command the guest sends us.
1190      *
1191      * Then we set UNIT_ATTENTION, by which the guest will
1192      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1193      */
1194     s->cdrom_changed = 1;
1195     s->events.new_media = true;
1196     s->events.eject_request = false;
1197     ide_set_irq(s->bus);
1198 }
1199 
1200 static void ide_cd_eject_request_cb(void *opaque, bool force)
1201 {
1202     IDEState *s = opaque;
1203 
1204     s->events.eject_request = true;
1205     if (force) {
1206         s->tray_locked = false;
1207     }
1208     ide_set_irq(s->bus);
1209 }
1210 
1211 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1212 {
1213     s->lba48 = lba48;
1214 
1215     /* handle the 'magic' 0 nsector count conversion here. to avoid
1216      * fiddling with the rest of the read logic, we just store the
1217      * full sector count in ->nsector and ignore ->hob_nsector from now
1218      */
1219     if (!s->lba48) {
1220         if (!s->nsector)
1221             s->nsector = 256;
1222     } else {
1223         if (!s->nsector && !s->hob_nsector)
1224             s->nsector = 65536;
1225         else {
1226             int lo = s->nsector;
1227             int hi = s->hob_nsector;
1228 
1229             s->nsector = (hi << 8) | lo;
1230         }
1231     }
1232 }
1233 
1234 static void ide_clear_hob(IDEBus *bus)
1235 {
1236     /* any write clears HOB high bit of device control register */
1237     bus->cmd &= ~(IDE_CTRL_HOB);
1238 }
1239 
1240 /* IOport [W]rite [R]egisters */
1241 enum ATA_IOPORT_WR {
1242     ATA_IOPORT_WR_DATA = 0,
1243     ATA_IOPORT_WR_FEATURES = 1,
1244     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1245     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1246     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1247     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1248     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1249     ATA_IOPORT_WR_COMMAND = 7,
1250     ATA_IOPORT_WR_NUM_REGISTERS,
1251 };
1252 
1253 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1254     [ATA_IOPORT_WR_DATA] = "Data",
1255     [ATA_IOPORT_WR_FEATURES] = "Features",
1256     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1257     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1258     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1259     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1260     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1261     [ATA_IOPORT_WR_COMMAND] = "Command"
1262 };
1263 
1264 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1265 {
1266     IDEBus *bus = opaque;
1267     IDEState *s = idebus_active_if(bus);
1268     int reg_num = addr & 7;
1269 
1270     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1271 
1272     /* ignore writes to command block while busy with previous command */
1273     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1274         return;
1275     }
1276 
1277     /* NOTE: Device0 and Device1 both receive incoming register writes.
1278      * (They're on the same bus! They have to!) */
1279 
1280     switch (reg_num) {
1281     case 0:
1282         break;
1283     case ATA_IOPORT_WR_FEATURES:
1284         ide_clear_hob(bus);
1285         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1286         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1287         bus->ifs[0].feature = val;
1288         bus->ifs[1].feature = val;
1289         break;
1290     case ATA_IOPORT_WR_SECTOR_COUNT:
1291         ide_clear_hob(bus);
1292         bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1293         bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1294         bus->ifs[0].nsector = val;
1295         bus->ifs[1].nsector = val;
1296         break;
1297     case ATA_IOPORT_WR_SECTOR_NUMBER:
1298         ide_clear_hob(bus);
1299         bus->ifs[0].hob_sector = bus->ifs[0].sector;
1300         bus->ifs[1].hob_sector = bus->ifs[1].sector;
1301         bus->ifs[0].sector = val;
1302         bus->ifs[1].sector = val;
1303         break;
1304     case ATA_IOPORT_WR_CYLINDER_LOW:
1305         ide_clear_hob(bus);
1306         bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1307         bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1308         bus->ifs[0].lcyl = val;
1309         bus->ifs[1].lcyl = val;
1310         break;
1311     case ATA_IOPORT_WR_CYLINDER_HIGH:
1312         ide_clear_hob(bus);
1313         bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1314         bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1315         bus->ifs[0].hcyl = val;
1316         bus->ifs[1].hcyl = val;
1317         break;
1318     case ATA_IOPORT_WR_DEVICE_HEAD:
1319         ide_clear_hob(bus);
1320         bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1321         bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1322         /* select drive */
1323         bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1324         break;
1325     default:
1326     case ATA_IOPORT_WR_COMMAND:
1327         ide_clear_hob(bus);
1328         qemu_irq_lower(bus->irq);
1329         ide_exec_cmd(bus, val);
1330         break;
1331     }
1332 }
1333 
1334 static void ide_reset(IDEState *s)
1335 {
1336     trace_ide_reset(s);
1337 
1338     if (s->pio_aiocb) {
1339         blk_aio_cancel(s->pio_aiocb);
1340         s->pio_aiocb = NULL;
1341     }
1342 
1343     if (s->reset_reverts) {
1344         s->reset_reverts = false;
1345         s->heads         = s->drive_heads;
1346         s->sectors       = s->drive_sectors;
1347     }
1348     if (s->drive_kind == IDE_CFATA)
1349         s->mult_sectors = 0;
1350     else
1351         s->mult_sectors = MAX_MULT_SECTORS;
1352     /* ide regs */
1353     s->feature = 0;
1354     s->error = 0;
1355     s->nsector = 0;
1356     s->sector = 0;
1357     s->lcyl = 0;
1358     s->hcyl = 0;
1359 
1360     /* lba48 */
1361     s->hob_feature = 0;
1362     s->hob_sector = 0;
1363     s->hob_nsector = 0;
1364     s->hob_lcyl = 0;
1365     s->hob_hcyl = 0;
1366 
1367     s->select = (ATA_DEV_ALWAYS_ON);
1368     s->status = READY_STAT | SEEK_STAT;
1369 
1370     s->lba48 = 0;
1371 
1372     /* ATAPI specific */
1373     s->sense_key = 0;
1374     s->asc = 0;
1375     s->cdrom_changed = 0;
1376     s->packet_transfer_size = 0;
1377     s->elementary_transfer_size = 0;
1378     s->io_buffer_index = 0;
1379     s->cd_sector_size = 0;
1380     s->atapi_dma = 0;
1381     s->tray_locked = 0;
1382     s->tray_open = 0;
1383     /* ATA DMA state */
1384     s->io_buffer_size = 0;
1385     s->req_nb_sectors = 0;
1386 
1387     ide_set_signature(s);
1388     /* init the transfer handler so that 0xffff is returned on data
1389        accesses */
1390     s->end_transfer_func = ide_dummy_transfer_stop;
1391     ide_dummy_transfer_stop(s);
1392     s->media_changed = 0;
1393 }
1394 
1395 static bool cmd_nop(IDEState *s, uint8_t cmd)
1396 {
1397     return true;
1398 }
1399 
1400 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1401 {
1402     /* Halt PIO (in the DRQ phase), then DMA */
1403     ide_transfer_halt(s);
1404     ide_cancel_dma_sync(s);
1405 
1406     /* Reset any PIO commands, reset signature, etc */
1407     ide_reset(s);
1408 
1409     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1410      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1411     s->status = 0x00;
1412 
1413     /* Do not overwrite status register */
1414     return false;
1415 }
1416 
1417 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1418 {
1419     switch (s->feature) {
1420     case DSM_TRIM:
1421         if (s->blk) {
1422             ide_sector_start_dma(s, IDE_DMA_TRIM);
1423             return false;
1424         }
1425         break;
1426     }
1427 
1428     ide_abort_command(s);
1429     return true;
1430 }
1431 
1432 static bool cmd_identify(IDEState *s, uint8_t cmd)
1433 {
1434     if (s->blk && s->drive_kind != IDE_CD) {
1435         if (s->drive_kind != IDE_CFATA) {
1436             ide_identify(s);
1437         } else {
1438             ide_cfata_identify(s);
1439         }
1440         s->status = READY_STAT | SEEK_STAT;
1441         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1442         ide_set_irq(s->bus);
1443         return false;
1444     } else {
1445         if (s->drive_kind == IDE_CD) {
1446             ide_set_signature(s);
1447         }
1448         ide_abort_command(s);
1449     }
1450 
1451     return true;
1452 }
1453 
1454 static bool cmd_verify(IDEState *s, uint8_t cmd)
1455 {
1456     bool lba48 = (cmd == WIN_VERIFY_EXT);
1457 
1458     /* do sector number check ? */
1459     ide_cmd_lba48_transform(s, lba48);
1460 
1461     return true;
1462 }
1463 
1464 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1465 {
1466     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1467         /* Disable Read and Write Multiple */
1468         s->mult_sectors = 0;
1469     } else if ((s->nsector & 0xff) != 0 &&
1470         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1471          (s->nsector & (s->nsector - 1)) != 0)) {
1472         ide_abort_command(s);
1473     } else {
1474         s->mult_sectors = s->nsector & 0xff;
1475     }
1476 
1477     return true;
1478 }
1479 
1480 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1481 {
1482     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1483 
1484     if (!s->blk || !s->mult_sectors) {
1485         ide_abort_command(s);
1486         return true;
1487     }
1488 
1489     ide_cmd_lba48_transform(s, lba48);
1490     s->req_nb_sectors = s->mult_sectors;
1491     ide_sector_read(s);
1492     return false;
1493 }
1494 
1495 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1496 {
1497     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1498     int n;
1499 
1500     if (!s->blk || !s->mult_sectors) {
1501         ide_abort_command(s);
1502         return true;
1503     }
1504 
1505     ide_cmd_lba48_transform(s, lba48);
1506 
1507     s->req_nb_sectors = s->mult_sectors;
1508     n = MIN(s->nsector, s->req_nb_sectors);
1509 
1510     s->status = SEEK_STAT | READY_STAT;
1511     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1512 
1513     s->media_changed = 1;
1514 
1515     return false;
1516 }
1517 
1518 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1519 {
1520     bool lba48 = (cmd == WIN_READ_EXT);
1521 
1522     if (s->drive_kind == IDE_CD) {
1523         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1524         ide_abort_command(s);
1525         return true;
1526     }
1527 
1528     if (!s->blk) {
1529         ide_abort_command(s);
1530         return true;
1531     }
1532 
1533     ide_cmd_lba48_transform(s, lba48);
1534     s->req_nb_sectors = 1;
1535     ide_sector_read(s);
1536 
1537     return false;
1538 }
1539 
1540 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1541 {
1542     bool lba48 = (cmd == WIN_WRITE_EXT);
1543 
1544     if (!s->blk) {
1545         ide_abort_command(s);
1546         return true;
1547     }
1548 
1549     ide_cmd_lba48_transform(s, lba48);
1550 
1551     s->req_nb_sectors = 1;
1552     s->status = SEEK_STAT | READY_STAT;
1553     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1554 
1555     s->media_changed = 1;
1556 
1557     return false;
1558 }
1559 
1560 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1561 {
1562     bool lba48 = (cmd == WIN_READDMA_EXT);
1563 
1564     if (!s->blk) {
1565         ide_abort_command(s);
1566         return true;
1567     }
1568 
1569     ide_cmd_lba48_transform(s, lba48);
1570     ide_sector_start_dma(s, IDE_DMA_READ);
1571 
1572     return false;
1573 }
1574 
1575 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1576 {
1577     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1578 
1579     if (!s->blk) {
1580         ide_abort_command(s);
1581         return true;
1582     }
1583 
1584     ide_cmd_lba48_transform(s, lba48);
1585     ide_sector_start_dma(s, IDE_DMA_WRITE);
1586 
1587     s->media_changed = 1;
1588 
1589     return false;
1590 }
1591 
1592 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1593 {
1594     ide_flush_cache(s);
1595     return false;
1596 }
1597 
1598 static bool cmd_seek(IDEState *s, uint8_t cmd)
1599 {
1600     /* XXX: Check that seek is within bounds */
1601     return true;
1602 }
1603 
1604 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1605 {
1606     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1607 
1608     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1609     if (s->nb_sectors == 0) {
1610         ide_abort_command(s);
1611         return true;
1612     }
1613 
1614     ide_cmd_lba48_transform(s, lba48);
1615     ide_set_sector(s, s->nb_sectors - 1);
1616 
1617     return true;
1618 }
1619 
1620 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1621 {
1622     s->nsector = 0xff; /* device active or idle */
1623     return true;
1624 }
1625 
1626 /* INITIALIZE DEVICE PARAMETERS */
1627 static bool cmd_specify(IDEState *s, uint8_t cmd)
1628 {
1629     if (s->blk && s->drive_kind != IDE_CD) {
1630         s->heads = (s->select & (ATA_DEV_HS)) + 1;
1631         s->sectors = s->nsector;
1632         ide_set_irq(s->bus);
1633     } else {
1634         ide_abort_command(s);
1635     }
1636 
1637     return true;
1638 }
1639 
1640 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1641 {
1642     uint16_t *identify_data;
1643 
1644     if (!s->blk) {
1645         ide_abort_command(s);
1646         return true;
1647     }
1648 
1649     /* XXX: valid for CDROM ? */
1650     switch (s->feature) {
1651     case 0x02: /* write cache enable */
1652         blk_set_enable_write_cache(s->blk, true);
1653         identify_data = (uint16_t *)s->identify_data;
1654         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1655         return true;
1656     case 0x82: /* write cache disable */
1657         blk_set_enable_write_cache(s->blk, false);
1658         identify_data = (uint16_t *)s->identify_data;
1659         put_le16(identify_data + 85, (1 << 14) | 1);
1660         ide_flush_cache(s);
1661         return false;
1662     case 0xcc: /* reverting to power-on defaults enable */
1663         s->reset_reverts = true;
1664         return true;
1665     case 0x66: /* reverting to power-on defaults disable */
1666         s->reset_reverts = false;
1667         return true;
1668     case 0xaa: /* read look-ahead enable */
1669     case 0x55: /* read look-ahead disable */
1670     case 0x05: /* set advanced power management mode */
1671     case 0x85: /* disable advanced power management mode */
1672     case 0x69: /* NOP */
1673     case 0x67: /* NOP */
1674     case 0x96: /* NOP */
1675     case 0x9a: /* NOP */
1676     case 0x42: /* enable Automatic Acoustic Mode */
1677     case 0xc2: /* disable Automatic Acoustic Mode */
1678         return true;
1679     case 0x03: /* set transfer mode */
1680         {
1681             uint8_t val = s->nsector & 0x07;
1682             identify_data = (uint16_t *)s->identify_data;
1683 
1684             switch (s->nsector >> 3) {
1685             case 0x00: /* pio default */
1686             case 0x01: /* pio mode */
1687                 put_le16(identify_data + 62, 0x07);
1688                 put_le16(identify_data + 63, 0x07);
1689                 put_le16(identify_data + 88, 0x3f);
1690                 break;
1691             case 0x02: /* sigle word dma mode*/
1692                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1693                 put_le16(identify_data + 63, 0x07);
1694                 put_le16(identify_data + 88, 0x3f);
1695                 break;
1696             case 0x04: /* mdma mode */
1697                 put_le16(identify_data + 62, 0x07);
1698                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1699                 put_le16(identify_data + 88, 0x3f);
1700                 break;
1701             case 0x08: /* udma mode */
1702                 put_le16(identify_data + 62, 0x07);
1703                 put_le16(identify_data + 63, 0x07);
1704                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1705                 break;
1706             default:
1707                 goto abort_cmd;
1708             }
1709             return true;
1710         }
1711     }
1712 
1713 abort_cmd:
1714     ide_abort_command(s);
1715     return true;
1716 }
1717 
1718 
1719 /*** ATAPI commands ***/
1720 
1721 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1722 {
1723     ide_atapi_identify(s);
1724     s->status = READY_STAT | SEEK_STAT;
1725     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1726     ide_set_irq(s->bus);
1727     return false;
1728 }
1729 
1730 /* EXECUTE DEVICE DIAGNOSTIC */
1731 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1732 {
1733     /*
1734      * Clear the device register per the ATA (v6) specification,
1735      * because ide_set_signature does not clear LBA or drive bits.
1736      */
1737     s->select = (ATA_DEV_ALWAYS_ON);
1738     ide_set_signature(s);
1739 
1740     if (s->drive_kind == IDE_CD) {
1741         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1742                         * devices to return a clear status register
1743                         * with READY_STAT *not* set. */
1744         s->error = 0x01;
1745     } else {
1746         s->status = READY_STAT | SEEK_STAT;
1747         /* The bits of the error register are not as usual for this command!
1748          * They are part of the regular output (this is why ERR_STAT isn't set)
1749          * Device 0 passed, Device 1 passed or not present. */
1750         s->error = 0x01;
1751         ide_set_irq(s->bus);
1752     }
1753 
1754     return false;
1755 }
1756 
1757 static bool cmd_packet(IDEState *s, uint8_t cmd)
1758 {
1759     /* overlapping commands not supported */
1760     if (s->feature & 0x02) {
1761         ide_abort_command(s);
1762         return true;
1763     }
1764 
1765     s->status = READY_STAT | SEEK_STAT;
1766     s->atapi_dma = s->feature & 1;
1767     if (s->atapi_dma) {
1768         s->dma_cmd = IDE_DMA_ATAPI;
1769     }
1770     s->nsector = 1;
1771     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1772                        ide_atapi_cmd);
1773     return false;
1774 }
1775 
1776 
1777 /*** CF-ATA commands ***/
1778 
1779 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1780 {
1781     s->error = 0x09;    /* miscellaneous error */
1782     s->status = READY_STAT | SEEK_STAT;
1783     ide_set_irq(s->bus);
1784 
1785     return false;
1786 }
1787 
1788 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1789 {
1790     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1791      * required for Windows 8 to work with AHCI */
1792 
1793     if (cmd == CFA_WEAR_LEVEL) {
1794         s->nsector = 0;
1795     }
1796 
1797     if (cmd == CFA_ERASE_SECTORS) {
1798         s->media_changed = 1;
1799     }
1800 
1801     return true;
1802 }
1803 
1804 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1805 {
1806     s->status = READY_STAT | SEEK_STAT;
1807 
1808     memset(s->io_buffer, 0, 0x200);
1809     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1810     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1811     s->io_buffer[0x02] = s->select;                 /* Head */
1812     s->io_buffer[0x03] = s->sector;                 /* Sector */
1813     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1814     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1815     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1816     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1817     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1818     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1819     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1820 
1821     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1822     ide_set_irq(s->bus);
1823 
1824     return false;
1825 }
1826 
1827 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1828 {
1829     switch (s->feature) {
1830     case 0x02:  /* Inquiry Metadata Storage */
1831         ide_cfata_metadata_inquiry(s);
1832         break;
1833     case 0x03:  /* Read Metadata Storage */
1834         ide_cfata_metadata_read(s);
1835         break;
1836     case 0x04:  /* Write Metadata Storage */
1837         ide_cfata_metadata_write(s);
1838         break;
1839     default:
1840         ide_abort_command(s);
1841         return true;
1842     }
1843 
1844     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1845     s->status = 0x00; /* NOTE: READY is _not_ set */
1846     ide_set_irq(s->bus);
1847 
1848     return false;
1849 }
1850 
1851 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1852 {
1853     switch (s->feature) {
1854     case 0x01:  /* sense temperature in device */
1855         s->nsector = 0x50;      /* +20 C */
1856         break;
1857     default:
1858         ide_abort_command(s);
1859         return true;
1860     }
1861 
1862     return true;
1863 }
1864 
1865 
1866 /*** SMART commands ***/
1867 
1868 static bool cmd_smart(IDEState *s, uint8_t cmd)
1869 {
1870     int n;
1871 
1872     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1873         goto abort_cmd;
1874     }
1875 
1876     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1877         goto abort_cmd;
1878     }
1879 
1880     switch (s->feature) {
1881     case SMART_DISABLE:
1882         s->smart_enabled = 0;
1883         return true;
1884 
1885     case SMART_ENABLE:
1886         s->smart_enabled = 1;
1887         return true;
1888 
1889     case SMART_ATTR_AUTOSAVE:
1890         switch (s->sector) {
1891         case 0x00:
1892             s->smart_autosave = 0;
1893             break;
1894         case 0xf1:
1895             s->smart_autosave = 1;
1896             break;
1897         default:
1898             goto abort_cmd;
1899         }
1900         return true;
1901 
1902     case SMART_STATUS:
1903         if (!s->smart_errors) {
1904             s->hcyl = 0xc2;
1905             s->lcyl = 0x4f;
1906         } else {
1907             s->hcyl = 0x2c;
1908             s->lcyl = 0xf4;
1909         }
1910         return true;
1911 
1912     case SMART_READ_THRESH:
1913         memset(s->io_buffer, 0, 0x200);
1914         s->io_buffer[0] = 0x01; /* smart struct version */
1915 
1916         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1917             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1918             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1919         }
1920 
1921         /* checksum */
1922         for (n = 0; n < 511; n++) {
1923             s->io_buffer[511] += s->io_buffer[n];
1924         }
1925         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1926 
1927         s->status = READY_STAT | SEEK_STAT;
1928         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1929         ide_set_irq(s->bus);
1930         return false;
1931 
1932     case SMART_READ_DATA:
1933         memset(s->io_buffer, 0, 0x200);
1934         s->io_buffer[0] = 0x01; /* smart struct version */
1935 
1936         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1937             int i;
1938             for (i = 0; i < 11; i++) {
1939                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1940             }
1941         }
1942 
1943         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1944         if (s->smart_selftest_count == 0) {
1945             s->io_buffer[363] = 0;
1946         } else {
1947             s->io_buffer[363] =
1948                 s->smart_selftest_data[3 +
1949                            (s->smart_selftest_count - 1) *
1950                            24];
1951         }
1952         s->io_buffer[364] = 0x20;
1953         s->io_buffer[365] = 0x01;
1954         /* offline data collection capacity: execute + self-test*/
1955         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1956         s->io_buffer[368] = 0x03; /* smart capability (1) */
1957         s->io_buffer[369] = 0x00; /* smart capability (2) */
1958         s->io_buffer[370] = 0x01; /* error logging supported */
1959         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1960         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1961         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1962 
1963         for (n = 0; n < 511; n++) {
1964             s->io_buffer[511] += s->io_buffer[n];
1965         }
1966         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1967 
1968         s->status = READY_STAT | SEEK_STAT;
1969         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1970         ide_set_irq(s->bus);
1971         return false;
1972 
1973     case SMART_READ_LOG:
1974         switch (s->sector) {
1975         case 0x01: /* summary smart error log */
1976             memset(s->io_buffer, 0, 0x200);
1977             s->io_buffer[0] = 0x01;
1978             s->io_buffer[1] = 0x00; /* no error entries */
1979             s->io_buffer[452] = s->smart_errors & 0xff;
1980             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1981 
1982             for (n = 0; n < 511; n++) {
1983                 s->io_buffer[511] += s->io_buffer[n];
1984             }
1985             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1986             break;
1987         case 0x06: /* smart self test log */
1988             memset(s->io_buffer, 0, 0x200);
1989             s->io_buffer[0] = 0x01;
1990             if (s->smart_selftest_count == 0) {
1991                 s->io_buffer[508] = 0;
1992             } else {
1993                 s->io_buffer[508] = s->smart_selftest_count;
1994                 for (n = 2; n < 506; n++)  {
1995                     s->io_buffer[n] = s->smart_selftest_data[n];
1996                 }
1997             }
1998 
1999             for (n = 0; n < 511; n++) {
2000                 s->io_buffer[511] += s->io_buffer[n];
2001             }
2002             s->io_buffer[511] = 0x100 - s->io_buffer[511];
2003             break;
2004         default:
2005             goto abort_cmd;
2006         }
2007         s->status = READY_STAT | SEEK_STAT;
2008         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2009         ide_set_irq(s->bus);
2010         return false;
2011 
2012     case SMART_EXECUTE_OFFLINE:
2013         switch (s->sector) {
2014         case 0: /* off-line routine */
2015         case 1: /* short self test */
2016         case 2: /* extended self test */
2017             s->smart_selftest_count++;
2018             if (s->smart_selftest_count > 21) {
2019                 s->smart_selftest_count = 1;
2020             }
2021             n = 2 + (s->smart_selftest_count - 1) * 24;
2022             s->smart_selftest_data[n] = s->sector;
2023             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
2024             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
2025             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
2026             break;
2027         default:
2028             goto abort_cmd;
2029         }
2030         return true;
2031     }
2032 
2033 abort_cmd:
2034     ide_abort_command(s);
2035     return true;
2036 }
2037 
2038 #define HD_OK (1u << IDE_HD)
2039 #define CD_OK (1u << IDE_CD)
2040 #define CFA_OK (1u << IDE_CFATA)
2041 #define HD_CFA_OK (HD_OK | CFA_OK)
2042 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2043 
2044 /* Set the Disk Seek Completed status bit during completion */
2045 #define SET_DSC (1u << 8)
2046 
2047 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2048 static const struct {
2049     /* Returns true if the completion code should be run */
2050     bool (*handler)(IDEState *s, uint8_t cmd);
2051     int flags;
2052 } ide_cmd_table[0x100] = {
2053     /* NOP not implemented, mandatory for CD */
2054     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
2055     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2056     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2057     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2058     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2059     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2060     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2061     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2062     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2063     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2064     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2065     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2066     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2067     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2068     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2069     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2070     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2071     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2072     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2073     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2074     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2075     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2076     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2077     [WIN_SPECIFY]                 = { cmd_specify, HD_CFA_OK | SET_DSC },
2078     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2079     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2080     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2081     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2082     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2083     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2084     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2085     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2086     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2087     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2088     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2089     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2090     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2091     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2092     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2093     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2094     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2095     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2096     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2097     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2098     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2099     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2100     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2101     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2102     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2103     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2104     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2105     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2106     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2107     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2108     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2109     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2110 };
2111 
2112 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2113 {
2114     return cmd < ARRAY_SIZE(ide_cmd_table)
2115         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2116 }
2117 
2118 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2119 {
2120     IDEState *s;
2121     bool complete;
2122 
2123     s = idebus_active_if(bus);
2124     trace_ide_exec_cmd(bus, s, val);
2125 
2126     /* ignore commands to non existent slave */
2127     if (s != bus->ifs && !s->blk) {
2128         return;
2129     }
2130 
2131     /* Only RESET is allowed while BSY and/or DRQ are set,
2132      * and only to ATAPI devices. */
2133     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2134         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2135             return;
2136         }
2137     }
2138 
2139     if (!ide_cmd_permitted(s, val)) {
2140         ide_abort_command(s);
2141         ide_set_irq(s->bus);
2142         return;
2143     }
2144 
2145     s->status = READY_STAT | BUSY_STAT;
2146     s->error = 0;
2147     s->io_buffer_offset = 0;
2148 
2149     complete = ide_cmd_table[val].handler(s, val);
2150     if (complete) {
2151         s->status &= ~BUSY_STAT;
2152         assert(!!s->error == !!(s->status & ERR_STAT));
2153 
2154         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2155             s->status |= SEEK_STAT;
2156         }
2157 
2158         ide_cmd_done(s);
2159         ide_set_irq(s->bus);
2160     }
2161 }
2162 
2163 /* IOport [R]ead [R]egisters */
2164 enum ATA_IOPORT_RR {
2165     ATA_IOPORT_RR_DATA = 0,
2166     ATA_IOPORT_RR_ERROR = 1,
2167     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2168     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2169     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2170     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2171     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2172     ATA_IOPORT_RR_STATUS = 7,
2173     ATA_IOPORT_RR_NUM_REGISTERS,
2174 };
2175 
2176 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2177     [ATA_IOPORT_RR_DATA] = "Data",
2178     [ATA_IOPORT_RR_ERROR] = "Error",
2179     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2180     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2181     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2182     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2183     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2184     [ATA_IOPORT_RR_STATUS] = "Status"
2185 };
2186 
2187 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2188 {
2189     IDEBus *bus = opaque;
2190     IDEState *s = idebus_active_if(bus);
2191     uint32_t reg_num;
2192     int ret, hob;
2193 
2194     reg_num = addr & 7;
2195     hob = bus->cmd & (IDE_CTRL_HOB);
2196     switch (reg_num) {
2197     case ATA_IOPORT_RR_DATA:
2198         /*
2199          * The pre-GRUB Solaris x86 bootloader relies upon inb
2200          * consuming a word from the drive's sector buffer.
2201          */
2202         ret = ide_data_readw(bus, addr) & 0xff;
2203         break;
2204     case ATA_IOPORT_RR_ERROR:
2205         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2206             (s != bus->ifs && !s->blk)) {
2207             ret = 0;
2208         } else if (!hob) {
2209             ret = s->error;
2210         } else {
2211             ret = s->hob_feature;
2212         }
2213         break;
2214     case ATA_IOPORT_RR_SECTOR_COUNT:
2215         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2216             ret = 0;
2217         } else if (!hob) {
2218             ret = s->nsector & 0xff;
2219         } else {
2220             ret = s->hob_nsector;
2221         }
2222         break;
2223     case ATA_IOPORT_RR_SECTOR_NUMBER:
2224         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2225             ret = 0;
2226         } else if (!hob) {
2227             ret = s->sector;
2228         } else {
2229             ret = s->hob_sector;
2230         }
2231         break;
2232     case ATA_IOPORT_RR_CYLINDER_LOW:
2233         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2234             ret = 0;
2235         } else if (!hob) {
2236             ret = s->lcyl;
2237         } else {
2238             ret = s->hob_lcyl;
2239         }
2240         break;
2241     case ATA_IOPORT_RR_CYLINDER_HIGH:
2242         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2243             ret = 0;
2244         } else if (!hob) {
2245             ret = s->hcyl;
2246         } else {
2247             ret = s->hob_hcyl;
2248         }
2249         break;
2250     case ATA_IOPORT_RR_DEVICE_HEAD:
2251         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2252             ret = 0;
2253         } else {
2254             ret = s->select;
2255         }
2256         break;
2257     default:
2258     case ATA_IOPORT_RR_STATUS:
2259         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2260             (s != bus->ifs && !s->blk)) {
2261             ret = 0;
2262         } else {
2263             ret = s->status;
2264         }
2265         qemu_irq_lower(bus->irq);
2266         break;
2267     }
2268 
2269     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2270     return ret;
2271 }
2272 
2273 uint32_t ide_status_read(void *opaque, uint32_t addr)
2274 {
2275     IDEBus *bus = opaque;
2276     IDEState *s = idebus_active_if(bus);
2277     int ret;
2278 
2279     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2280         (s != bus->ifs && !s->blk)) {
2281         ret = 0;
2282     } else {
2283         ret = s->status;
2284     }
2285 
2286     trace_ide_status_read(addr, ret, bus, s);
2287     return ret;
2288 }
2289 
2290 static void ide_perform_srst(IDEState *s)
2291 {
2292     s->status |= BUSY_STAT;
2293 
2294     /* Halt PIO (Via register state); PIO BH remains scheduled. */
2295     ide_transfer_halt(s);
2296 
2297     /* Cancel DMA -- may drain block device and invoke callbacks */
2298     ide_cancel_dma_sync(s);
2299 
2300     /* Cancel PIO callback, reset registers/signature, etc */
2301     ide_reset(s);
2302 
2303     /* perform diagnostic */
2304     cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2305 }
2306 
2307 static void ide_bus_perform_srst(void *opaque)
2308 {
2309     IDEBus *bus = opaque;
2310     IDEState *s;
2311     int i;
2312 
2313     for (i = 0; i < 2; i++) {
2314         s = &bus->ifs[i];
2315         ide_perform_srst(s);
2316     }
2317 
2318     bus->cmd &= ~IDE_CTRL_RESET;
2319 }
2320 
2321 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2322 {
2323     IDEBus *bus = opaque;
2324     IDEState *s;
2325     int i;
2326 
2327     trace_ide_ctrl_write(addr, val, bus);
2328 
2329     /* Device0 and Device1 each have their own control register,
2330      * but QEMU models it as just one register in the controller. */
2331     if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2332         for (i = 0; i < 2; i++) {
2333             s = &bus->ifs[i];
2334             s->status |= BUSY_STAT;
2335         }
2336         replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2337                                          ide_bus_perform_srst, bus);
2338     }
2339 
2340     bus->cmd = val;
2341 }
2342 
2343 /*
2344  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2345  * transferred from the device to the guest), false if it's a PIO in
2346  */
2347 static bool ide_is_pio_out(IDEState *s)
2348 {
2349     if (s->end_transfer_func == ide_sector_write ||
2350         s->end_transfer_func == ide_atapi_cmd) {
2351         return false;
2352     } else if (s->end_transfer_func == ide_sector_read ||
2353                s->end_transfer_func == ide_transfer_stop ||
2354                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2355                s->end_transfer_func == ide_dummy_transfer_stop) {
2356         return true;
2357     }
2358 
2359     abort();
2360 }
2361 
2362 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2363 {
2364     IDEBus *bus = opaque;
2365     IDEState *s = idebus_active_if(bus);
2366     uint8_t *p;
2367 
2368     trace_ide_data_writew(addr, val, bus, s);
2369 
2370     /* PIO data access allowed only when DRQ bit is set. The result of a write
2371      * during PIO out is indeterminate, just ignore it. */
2372     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2373         return;
2374     }
2375 
2376     p = s->data_ptr;
2377     if (p + 2 > s->data_end) {
2378         return;
2379     }
2380 
2381     *(uint16_t *)p = le16_to_cpu(val);
2382     p += 2;
2383     s->data_ptr = p;
2384     if (p >= s->data_end) {
2385         s->status &= ~DRQ_STAT;
2386         s->end_transfer_func(s);
2387     }
2388 }
2389 
2390 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2391 {
2392     IDEBus *bus = opaque;
2393     IDEState *s = idebus_active_if(bus);
2394     uint8_t *p;
2395     int ret;
2396 
2397     /* PIO data access allowed only when DRQ bit is set. The result of a read
2398      * during PIO in is indeterminate, return 0 and don't move forward. */
2399     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2400         return 0;
2401     }
2402 
2403     p = s->data_ptr;
2404     if (p + 2 > s->data_end) {
2405         return 0;
2406     }
2407 
2408     ret = cpu_to_le16(*(uint16_t *)p);
2409     p += 2;
2410     s->data_ptr = p;
2411     if (p >= s->data_end) {
2412         s->status &= ~DRQ_STAT;
2413         s->end_transfer_func(s);
2414     }
2415 
2416     trace_ide_data_readw(addr, ret, bus, s);
2417     return ret;
2418 }
2419 
2420 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2421 {
2422     IDEBus *bus = opaque;
2423     IDEState *s = idebus_active_if(bus);
2424     uint8_t *p;
2425 
2426     trace_ide_data_writel(addr, val, bus, s);
2427 
2428     /* PIO data access allowed only when DRQ bit is set. The result of a write
2429      * during PIO out is indeterminate, just ignore it. */
2430     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2431         return;
2432     }
2433 
2434     p = s->data_ptr;
2435     if (p + 4 > s->data_end) {
2436         return;
2437     }
2438 
2439     *(uint32_t *)p = le32_to_cpu(val);
2440     p += 4;
2441     s->data_ptr = p;
2442     if (p >= s->data_end) {
2443         s->status &= ~DRQ_STAT;
2444         s->end_transfer_func(s);
2445     }
2446 }
2447 
2448 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2449 {
2450     IDEBus *bus = opaque;
2451     IDEState *s = idebus_active_if(bus);
2452     uint8_t *p;
2453     int ret;
2454 
2455     /* PIO data access allowed only when DRQ bit is set. The result of a read
2456      * during PIO in is indeterminate, return 0 and don't move forward. */
2457     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2458         ret = 0;
2459         goto out;
2460     }
2461 
2462     p = s->data_ptr;
2463     if (p + 4 > s->data_end) {
2464         return 0;
2465     }
2466 
2467     ret = cpu_to_le32(*(uint32_t *)p);
2468     p += 4;
2469     s->data_ptr = p;
2470     if (p >= s->data_end) {
2471         s->status &= ~DRQ_STAT;
2472         s->end_transfer_func(s);
2473     }
2474 
2475 out:
2476     trace_ide_data_readl(addr, ret, bus, s);
2477     return ret;
2478 }
2479 
2480 static void ide_dummy_transfer_stop(IDEState *s)
2481 {
2482     s->data_ptr = s->io_buffer;
2483     s->data_end = s->io_buffer;
2484     s->io_buffer[0] = 0xff;
2485     s->io_buffer[1] = 0xff;
2486     s->io_buffer[2] = 0xff;
2487     s->io_buffer[3] = 0xff;
2488 }
2489 
2490 void ide_bus_reset(IDEBus *bus)
2491 {
2492     bus->unit = 0;
2493     bus->cmd = 0;
2494     ide_reset(&bus->ifs[0]);
2495     ide_reset(&bus->ifs[1]);
2496     ide_clear_hob(bus);
2497 
2498     /* pending async DMA */
2499     if (bus->dma->aiocb) {
2500         trace_ide_bus_reset_aio();
2501         blk_aio_cancel(bus->dma->aiocb);
2502         bus->dma->aiocb = NULL;
2503     }
2504 
2505     /* reset dma provider too */
2506     if (bus->dma->ops->reset) {
2507         bus->dma->ops->reset(bus->dma);
2508     }
2509 }
2510 
2511 static bool ide_cd_is_tray_open(void *opaque)
2512 {
2513     return ((IDEState *)opaque)->tray_open;
2514 }
2515 
2516 static bool ide_cd_is_medium_locked(void *opaque)
2517 {
2518     return ((IDEState *)opaque)->tray_locked;
2519 }
2520 
2521 static void ide_resize_cb(void *opaque)
2522 {
2523     IDEState *s = opaque;
2524     uint64_t nb_sectors;
2525 
2526     if (!s->identify_set) {
2527         return;
2528     }
2529 
2530     blk_get_geometry(s->blk, &nb_sectors);
2531     s->nb_sectors = nb_sectors;
2532 
2533     /* Update the identify data buffer. */
2534     if (s->drive_kind == IDE_CFATA) {
2535         ide_cfata_identify_size(s);
2536     } else {
2537         /* IDE_CD uses a different set of callbacks entirely. */
2538         assert(s->drive_kind != IDE_CD);
2539         ide_identify_size(s);
2540     }
2541 }
2542 
2543 static const BlockDevOps ide_cd_block_ops = {
2544     .change_media_cb = ide_cd_change_cb,
2545     .eject_request_cb = ide_cd_eject_request_cb,
2546     .is_tray_open = ide_cd_is_tray_open,
2547     .is_medium_locked = ide_cd_is_medium_locked,
2548 };
2549 
2550 static const BlockDevOps ide_hd_block_ops = {
2551     .resize_cb = ide_resize_cb,
2552 };
2553 
2554 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2555                    const char *version, const char *serial, const char *model,
2556                    uint64_t wwn,
2557                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2558                    int chs_trans, Error **errp)
2559 {
2560     uint64_t nb_sectors;
2561 
2562     s->blk = blk;
2563     s->drive_kind = kind;
2564 
2565     blk_get_geometry(blk, &nb_sectors);
2566     s->cylinders = cylinders;
2567     s->heads = s->drive_heads = heads;
2568     s->sectors = s->drive_sectors = secs;
2569     s->chs_trans = chs_trans;
2570     s->nb_sectors = nb_sectors;
2571     s->wwn = wwn;
2572     /* The SMART values should be preserved across power cycles
2573        but they aren't.  */
2574     s->smart_enabled = 1;
2575     s->smart_autosave = 1;
2576     s->smart_errors = 0;
2577     s->smart_selftest_count = 0;
2578     if (kind == IDE_CD) {
2579         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2580     } else {
2581         if (!blk_is_inserted(s->blk)) {
2582             error_setg(errp, "Device needs media, but drive is empty");
2583             return -1;
2584         }
2585         if (!blk_is_writable(blk)) {
2586             error_setg(errp, "Can't use a read-only drive");
2587             return -1;
2588         }
2589         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2590     }
2591     if (serial) {
2592         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2593     } else {
2594         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2595                  "QM%05d", s->drive_serial);
2596     }
2597     if (model) {
2598         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2599     } else {
2600         switch (kind) {
2601         case IDE_CD:
2602             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2603             break;
2604         case IDE_CFATA:
2605             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2606             break;
2607         default:
2608             strcpy(s->drive_model_str, "QEMU HARDDISK");
2609             break;
2610         }
2611     }
2612 
2613     if (version) {
2614         pstrcpy(s->version, sizeof(s->version), version);
2615     } else {
2616         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2617     }
2618 
2619     ide_reset(s);
2620     blk_iostatus_enable(blk);
2621     return 0;
2622 }
2623 
2624 static void ide_init1(IDEBus *bus, int unit)
2625 {
2626     static int drive_serial = 1;
2627     IDEState *s = &bus->ifs[unit];
2628 
2629     s->bus = bus;
2630     s->unit = unit;
2631     s->drive_serial = drive_serial++;
2632     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2633     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2634     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2635     memset(s->io_buffer, 0, s->io_buffer_total_len);
2636 
2637     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2638     memset(s->smart_selftest_data, 0, 512);
2639 
2640     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2641                                            ide_sector_write_timer_cb, s);
2642 }
2643 
2644 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2645 {
2646     return 0;
2647 }
2648 
2649 static void ide_nop(const IDEDMA *dma)
2650 {
2651 }
2652 
2653 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2654 {
2655     return 0;
2656 }
2657 
2658 static const IDEDMAOps ide_dma_nop_ops = {
2659     .prepare_buf    = ide_nop_int32,
2660     .restart_dma    = ide_nop,
2661     .rw_buf         = ide_nop_int,
2662 };
2663 
2664 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2665 {
2666     s->unit = s->bus->retry_unit;
2667     ide_set_sector(s, s->bus->retry_sector_num);
2668     s->nsector = s->bus->retry_nsector;
2669     s->bus->dma->ops->restart_dma(s->bus->dma);
2670     s->io_buffer_size = 0;
2671     s->dma_cmd = dma_cmd;
2672     ide_start_dma(s, ide_dma_cb);
2673 }
2674 
2675 static void ide_restart_bh(void *opaque)
2676 {
2677     IDEBus *bus = opaque;
2678     IDEState *s;
2679     bool is_read;
2680     int error_status;
2681 
2682     qemu_bh_delete(bus->bh);
2683     bus->bh = NULL;
2684 
2685     error_status = bus->error_status;
2686     if (bus->error_status == 0) {
2687         return;
2688     }
2689 
2690     s = idebus_active_if(bus);
2691     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2692 
2693     /* The error status must be cleared before resubmitting the request: The
2694      * request may fail again, and this case can only be distinguished if the
2695      * called function can set a new error status. */
2696     bus->error_status = 0;
2697 
2698     /* The HBA has generically asked to be kicked on retry */
2699     if (error_status & IDE_RETRY_HBA) {
2700         if (s->bus->dma->ops->restart) {
2701             s->bus->dma->ops->restart(s->bus->dma);
2702         }
2703     } else if (IS_IDE_RETRY_DMA(error_status)) {
2704         if (error_status & IDE_RETRY_TRIM) {
2705             ide_restart_dma(s, IDE_DMA_TRIM);
2706         } else {
2707             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2708         }
2709     } else if (IS_IDE_RETRY_PIO(error_status)) {
2710         if (is_read) {
2711             ide_sector_read(s);
2712         } else {
2713             ide_sector_write(s);
2714         }
2715     } else if (error_status & IDE_RETRY_FLUSH) {
2716         ide_flush_cache(s);
2717     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2718         assert(s->end_transfer_func == ide_atapi_cmd);
2719         ide_atapi_dma_restart(s);
2720     } else {
2721         abort();
2722     }
2723 }
2724 
2725 static void ide_restart_cb(void *opaque, bool running, RunState state)
2726 {
2727     IDEBus *bus = opaque;
2728 
2729     if (!running)
2730         return;
2731 
2732     if (!bus->bh) {
2733         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2734         qemu_bh_schedule(bus->bh);
2735     }
2736 }
2737 
2738 void ide_register_restart_cb(IDEBus *bus)
2739 {
2740     if (bus->dma->ops->restart_dma) {
2741         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2742     }
2743 }
2744 
2745 static IDEDMA ide_dma_nop = {
2746     .ops = &ide_dma_nop_ops,
2747     .aiocb = NULL,
2748 };
2749 
2750 void ide_init2(IDEBus *bus, qemu_irq irq)
2751 {
2752     int i;
2753 
2754     for(i = 0; i < 2; i++) {
2755         ide_init1(bus, i);
2756         ide_reset(&bus->ifs[i]);
2757     }
2758     bus->irq = irq;
2759     bus->dma = &ide_dma_nop;
2760 }
2761 
2762 void ide_exit(IDEState *s)
2763 {
2764     timer_free(s->sector_write_timer);
2765     qemu_vfree(s->smart_selftest_data);
2766     qemu_vfree(s->io_buffer);
2767 }
2768 
2769 static bool is_identify_set(void *opaque, int version_id)
2770 {
2771     IDEState *s = opaque;
2772 
2773     return s->identify_set != 0;
2774 }
2775 
2776 static EndTransferFunc* transfer_end_table[] = {
2777         ide_sector_read,
2778         ide_sector_write,
2779         ide_transfer_stop,
2780         ide_atapi_cmd_reply_end,
2781         ide_atapi_cmd,
2782         ide_dummy_transfer_stop,
2783 };
2784 
2785 static int transfer_end_table_idx(EndTransferFunc *fn)
2786 {
2787     int i;
2788 
2789     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2790         if (transfer_end_table[i] == fn)
2791             return i;
2792 
2793     return -1;
2794 }
2795 
2796 static int ide_drive_post_load(void *opaque, int version_id)
2797 {
2798     IDEState *s = opaque;
2799 
2800     if (s->blk && s->identify_set) {
2801         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2802     }
2803     return 0;
2804 }
2805 
2806 static int ide_drive_pio_post_load(void *opaque, int version_id)
2807 {
2808     IDEState *s = opaque;
2809 
2810     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2811         return -EINVAL;
2812     }
2813     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2814     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2815     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2816     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2817 
2818     return 0;
2819 }
2820 
2821 static int ide_drive_pio_pre_save(void *opaque)
2822 {
2823     IDEState *s = opaque;
2824     int idx;
2825 
2826     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2827     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2828 
2829     idx = transfer_end_table_idx(s->end_transfer_func);
2830     if (idx == -1) {
2831         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2832                         __func__);
2833         s->end_transfer_fn_idx = 2;
2834     } else {
2835         s->end_transfer_fn_idx = idx;
2836     }
2837 
2838     return 0;
2839 }
2840 
2841 static bool ide_drive_pio_state_needed(void *opaque)
2842 {
2843     IDEState *s = opaque;
2844 
2845     return ((s->status & DRQ_STAT) != 0)
2846         || (s->bus->error_status & IDE_RETRY_PIO);
2847 }
2848 
2849 static bool ide_tray_state_needed(void *opaque)
2850 {
2851     IDEState *s = opaque;
2852 
2853     return s->tray_open || s->tray_locked;
2854 }
2855 
2856 static bool ide_atapi_gesn_needed(void *opaque)
2857 {
2858     IDEState *s = opaque;
2859 
2860     return s->events.new_media || s->events.eject_request;
2861 }
2862 
2863 static bool ide_error_needed(void *opaque)
2864 {
2865     IDEBus *bus = opaque;
2866 
2867     return (bus->error_status != 0);
2868 }
2869 
2870 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2871 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2872     .name ="ide_drive/atapi/gesn_state",
2873     .version_id = 1,
2874     .minimum_version_id = 1,
2875     .needed = ide_atapi_gesn_needed,
2876     .fields = (VMStateField[]) {
2877         VMSTATE_BOOL(events.new_media, IDEState),
2878         VMSTATE_BOOL(events.eject_request, IDEState),
2879         VMSTATE_END_OF_LIST()
2880     }
2881 };
2882 
2883 static const VMStateDescription vmstate_ide_tray_state = {
2884     .name = "ide_drive/tray_state",
2885     .version_id = 1,
2886     .minimum_version_id = 1,
2887     .needed = ide_tray_state_needed,
2888     .fields = (VMStateField[]) {
2889         VMSTATE_BOOL(tray_open, IDEState),
2890         VMSTATE_BOOL(tray_locked, IDEState),
2891         VMSTATE_END_OF_LIST()
2892     }
2893 };
2894 
2895 static const VMStateDescription vmstate_ide_drive_pio_state = {
2896     .name = "ide_drive/pio_state",
2897     .version_id = 1,
2898     .minimum_version_id = 1,
2899     .pre_save = ide_drive_pio_pre_save,
2900     .post_load = ide_drive_pio_post_load,
2901     .needed = ide_drive_pio_state_needed,
2902     .fields = (VMStateField[]) {
2903         VMSTATE_INT32(req_nb_sectors, IDEState),
2904         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2905                              vmstate_info_uint8, uint8_t),
2906         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2907         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2908         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2909         VMSTATE_INT32(elementary_transfer_size, IDEState),
2910         VMSTATE_INT32(packet_transfer_size, IDEState),
2911         VMSTATE_END_OF_LIST()
2912     }
2913 };
2914 
2915 const VMStateDescription vmstate_ide_drive = {
2916     .name = "ide_drive",
2917     .version_id = 3,
2918     .minimum_version_id = 0,
2919     .post_load = ide_drive_post_load,
2920     .fields = (VMStateField[]) {
2921         VMSTATE_INT32(mult_sectors, IDEState),
2922         VMSTATE_INT32(identify_set, IDEState),
2923         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2924         VMSTATE_UINT8(feature, IDEState),
2925         VMSTATE_UINT8(error, IDEState),
2926         VMSTATE_UINT32(nsector, IDEState),
2927         VMSTATE_UINT8(sector, IDEState),
2928         VMSTATE_UINT8(lcyl, IDEState),
2929         VMSTATE_UINT8(hcyl, IDEState),
2930         VMSTATE_UINT8(hob_feature, IDEState),
2931         VMSTATE_UINT8(hob_sector, IDEState),
2932         VMSTATE_UINT8(hob_nsector, IDEState),
2933         VMSTATE_UINT8(hob_lcyl, IDEState),
2934         VMSTATE_UINT8(hob_hcyl, IDEState),
2935         VMSTATE_UINT8(select, IDEState),
2936         VMSTATE_UINT8(status, IDEState),
2937         VMSTATE_UINT8(lba48, IDEState),
2938         VMSTATE_UINT8(sense_key, IDEState),
2939         VMSTATE_UINT8(asc, IDEState),
2940         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2941         VMSTATE_END_OF_LIST()
2942     },
2943     .subsections = (const VMStateDescription*[]) {
2944         &vmstate_ide_drive_pio_state,
2945         &vmstate_ide_tray_state,
2946         &vmstate_ide_atapi_gesn_state,
2947         NULL
2948     }
2949 };
2950 
2951 static const VMStateDescription vmstate_ide_error_status = {
2952     .name ="ide_bus/error",
2953     .version_id = 2,
2954     .minimum_version_id = 1,
2955     .needed = ide_error_needed,
2956     .fields = (VMStateField[]) {
2957         VMSTATE_INT32(error_status, IDEBus),
2958         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2959         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2960         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2961         VMSTATE_END_OF_LIST()
2962     }
2963 };
2964 
2965 const VMStateDescription vmstate_ide_bus = {
2966     .name = "ide_bus",
2967     .version_id = 1,
2968     .minimum_version_id = 1,
2969     .fields = (VMStateField[]) {
2970         VMSTATE_UINT8(cmd, IDEBus),
2971         VMSTATE_UINT8(unit, IDEBus),
2972         VMSTATE_END_OF_LIST()
2973     },
2974     .subsections = (const VMStateDescription*[]) {
2975         &vmstate_ide_error_status,
2976         NULL
2977     }
2978 };
2979 
2980 void ide_drive_get(DriveInfo **hd, int n)
2981 {
2982     int i;
2983 
2984     for (i = 0; i < n; i++) {
2985         hd[i] = drive_get_by_index(IF_IDE, i);
2986     }
2987 }
2988