xref: /openbmc/qemu/hw/ide/core.c (revision 0d04c4c9)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/isa/isa.h"
28 #include "migration/vmstate.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/blockdev.h"
34 #include "sysemu/dma.h"
35 #include "hw/block/block.h"
36 #include "sysemu/block-backend.h"
37 #include "qapi/error.h"
38 #include "qemu/cutils.h"
39 #include "sysemu/replay.h"
40 #include "sysemu/runstate.h"
41 #include "hw/ide/internal.h"
42 #include "trace.h"
43 
44 /* These values were based on a Seagate ST3500418AS but have been modified
45    to make more sense in QEMU */
46 static const int smart_attributes[][12] = {
47     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
48     /* raw read error rate*/
49     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
50     /* spin up */
51     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52     /* start stop count */
53     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
54     /* remapped sectors */
55     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
56     /* power on hours */
57     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58     /* power cycle count */
59     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60     /* airflow-temperature-celsius */
61     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
62 };
63 
64 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
65     [IDE_DMA_READ] = "DMA READ",
66     [IDE_DMA_WRITE] = "DMA WRITE",
67     [IDE_DMA_TRIM] = "DMA TRIM",
68     [IDE_DMA_ATAPI] = "DMA ATAPI"
69 };
70 
71 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
72 {
73     if ((unsigned)enval < IDE_DMA__COUNT) {
74         return IDE_DMA_CMD_lookup[enval];
75     }
76     return "DMA UNKNOWN CMD";
77 }
78 
79 static void ide_dummy_transfer_stop(IDEState *s);
80 
81 static void padstr(char *str, const char *src, int len)
82 {
83     int i, v;
84     for(i = 0; i < len; i++) {
85         if (*src)
86             v = *src++;
87         else
88             v = ' ';
89         str[i^1] = v;
90     }
91 }
92 
93 static void put_le16(uint16_t *p, unsigned int v)
94 {
95     *p = cpu_to_le16(v);
96 }
97 
98 static void ide_identify_size(IDEState *s)
99 {
100     uint16_t *p = (uint16_t *)s->identify_data;
101     int64_t nb_sectors_lba28 = s->nb_sectors;
102     if (nb_sectors_lba28 >= 1 << 28) {
103         nb_sectors_lba28 = (1 << 28) - 1;
104     }
105     put_le16(p + 60, nb_sectors_lba28);
106     put_le16(p + 61, nb_sectors_lba28 >> 16);
107     put_le16(p + 100, s->nb_sectors);
108     put_le16(p + 101, s->nb_sectors >> 16);
109     put_le16(p + 102, s->nb_sectors >> 32);
110     put_le16(p + 103, s->nb_sectors >> 48);
111 }
112 
113 static void ide_identify(IDEState *s)
114 {
115     uint16_t *p;
116     unsigned int oldsize;
117     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
118 
119     p = (uint16_t *)s->identify_data;
120     if (s->identify_set) {
121         goto fill_buffer;
122     }
123     memset(p, 0, sizeof(s->identify_data));
124 
125     put_le16(p + 0, 0x0040);
126     put_le16(p + 1, s->cylinders);
127     put_le16(p + 3, s->heads);
128     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
129     put_le16(p + 5, 512); /* XXX: retired, remove ? */
130     put_le16(p + 6, s->sectors);
131     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
132     put_le16(p + 20, 3); /* XXX: retired, remove ? */
133     put_le16(p + 21, 512); /* cache size in sectors */
134     put_le16(p + 22, 4); /* ecc bytes */
135     padstr((char *)(p + 23), s->version, 8); /* firmware version */
136     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
137 #if MAX_MULT_SECTORS > 1
138     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
139 #endif
140     put_le16(p + 48, 1); /* dword I/O */
141     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
142     put_le16(p + 51, 0x200); /* PIO transfer cycle */
143     put_le16(p + 52, 0x200); /* DMA transfer cycle */
144     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
145     put_le16(p + 54, s->cylinders);
146     put_le16(p + 55, s->heads);
147     put_le16(p + 56, s->sectors);
148     oldsize = s->cylinders * s->heads * s->sectors;
149     put_le16(p + 57, oldsize);
150     put_le16(p + 58, oldsize >> 16);
151     if (s->mult_sectors)
152         put_le16(p + 59, 0x100 | s->mult_sectors);
153     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
154     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
155     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
156     put_le16(p + 63, 0x07); /* mdma0-2 supported */
157     put_le16(p + 64, 0x03); /* pio3-4 supported */
158     put_le16(p + 65, 120);
159     put_le16(p + 66, 120);
160     put_le16(p + 67, 120);
161     put_le16(p + 68, 120);
162     if (dev && dev->conf.discard_granularity) {
163         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
164     }
165 
166     if (s->ncq_queues) {
167         put_le16(p + 75, s->ncq_queues - 1);
168         /* NCQ supported */
169         put_le16(p + 76, (1 << 8));
170     }
171 
172     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
173     put_le16(p + 81, 0x16); /* conforms to ata5 */
174     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
175     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
176     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
177     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
178     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
179     if (s->wwn) {
180         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
181     } else {
182         put_le16(p + 84, (1 << 14) | 0);
183     }
184     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
185     if (blk_enable_write_cache(s->blk)) {
186         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
187     } else {
188         put_le16(p + 85, (1 << 14) | 1);
189     }
190     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
191     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
192     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
193     if (s->wwn) {
194         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
195     } else {
196         put_le16(p + 87, (1 << 14) | 0);
197     }
198     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
199     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
200     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
201     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
202     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
203     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
204 
205     if (dev && dev->conf.physical_block_size)
206         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
207     if (s->wwn) {
208         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
209         put_le16(p + 108, s->wwn >> 48);
210         put_le16(p + 109, s->wwn >> 32);
211         put_le16(p + 110, s->wwn >> 16);
212         put_le16(p + 111, s->wwn);
213     }
214     if (dev && dev->conf.discard_granularity) {
215         put_le16(p + 169, 1); /* TRIM support */
216     }
217     if (dev) {
218         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
219     }
220 
221     ide_identify_size(s);
222     s->identify_set = 1;
223 
224 fill_buffer:
225     memcpy(s->io_buffer, p, sizeof(s->identify_data));
226 }
227 
228 static void ide_atapi_identify(IDEState *s)
229 {
230     uint16_t *p;
231 
232     p = (uint16_t *)s->identify_data;
233     if (s->identify_set) {
234         goto fill_buffer;
235     }
236     memset(p, 0, sizeof(s->identify_data));
237 
238     /* Removable CDROM, 50us response, 12 byte packets */
239     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
240     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
241     put_le16(p + 20, 3); /* buffer type */
242     put_le16(p + 21, 512); /* cache size in sectors */
243     put_le16(p + 22, 4); /* ecc bytes */
244     padstr((char *)(p + 23), s->version, 8); /* firmware version */
245     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
246     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
247 #ifdef USE_DMA_CDROM
248     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
249     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
250     put_le16(p + 62, 7);  /* single word dma0-2 supported */
251     put_le16(p + 63, 7);  /* mdma0-2 supported */
252 #else
253     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
254     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
255     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
256 #endif
257     put_le16(p + 64, 3); /* pio3-4 supported */
258     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
259     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
260     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
261     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
262 
263     put_le16(p + 71, 30); /* in ns */
264     put_le16(p + 72, 30); /* in ns */
265 
266     if (s->ncq_queues) {
267         put_le16(p + 75, s->ncq_queues - 1);
268         /* NCQ supported */
269         put_le16(p + 76, (1 << 8));
270     }
271 
272     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
273     if (s->wwn) {
274         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
275         put_le16(p + 87, (1 << 8)); /* WWN enabled */
276     }
277 
278 #ifdef USE_DMA_CDROM
279     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
280 #endif
281 
282     if (s->wwn) {
283         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
284         put_le16(p + 108, s->wwn >> 48);
285         put_le16(p + 109, s->wwn >> 32);
286         put_le16(p + 110, s->wwn >> 16);
287         put_le16(p + 111, s->wwn);
288     }
289 
290     s->identify_set = 1;
291 
292 fill_buffer:
293     memcpy(s->io_buffer, p, sizeof(s->identify_data));
294 }
295 
296 static void ide_cfata_identify_size(IDEState *s)
297 {
298     uint16_t *p = (uint16_t *)s->identify_data;
299     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
300     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
301     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
302     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
303 }
304 
305 static void ide_cfata_identify(IDEState *s)
306 {
307     uint16_t *p;
308     uint32_t cur_sec;
309 
310     p = (uint16_t *)s->identify_data;
311     if (s->identify_set) {
312         goto fill_buffer;
313     }
314     memset(p, 0, sizeof(s->identify_data));
315 
316     cur_sec = s->cylinders * s->heads * s->sectors;
317 
318     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
319     put_le16(p + 1, s->cylinders);		/* Default cylinders */
320     put_le16(p + 3, s->heads);			/* Default heads */
321     put_le16(p + 6, s->sectors);		/* Default sectors per track */
322     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
323     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
324     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
325     put_le16(p + 22, 0x0004);			/* ECC bytes */
326     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
327     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
328 #if MAX_MULT_SECTORS > 1
329     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
330 #else
331     put_le16(p + 47, 0x0000);
332 #endif
333     put_le16(p + 49, 0x0f00);			/* Capabilities */
334     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
335     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
336     put_le16(p + 53, 0x0003);			/* Translation params valid */
337     put_le16(p + 54, s->cylinders);		/* Current cylinders */
338     put_le16(p + 55, s->heads);			/* Current heads */
339     put_le16(p + 56, s->sectors);		/* Current sectors */
340     put_le16(p + 57, cur_sec);			/* Current capacity */
341     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
342     if (s->mult_sectors)			/* Multiple sector setting */
343         put_le16(p + 59, 0x100 | s->mult_sectors);
344     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
345     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
346     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
347     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
348     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
349     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
350     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
351     put_le16(p + 82, 0x400c);			/* Command Set supported */
352     put_le16(p + 83, 0x7068);			/* Command Set supported */
353     put_le16(p + 84, 0x4000);			/* Features supported */
354     put_le16(p + 85, 0x000c);			/* Command Set enabled */
355     put_le16(p + 86, 0x7044);			/* Command Set enabled */
356     put_le16(p + 87, 0x4000);			/* Features enabled */
357     put_le16(p + 91, 0x4060);			/* Current APM level */
358     put_le16(p + 129, 0x0002);			/* Current features option */
359     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
360     put_le16(p + 131, 0x0001);			/* Initial power mode */
361     put_le16(p + 132, 0x0000);			/* User signature */
362     put_le16(p + 160, 0x8100);			/* Power requirement */
363     put_le16(p + 161, 0x8001);			/* CF command set */
364 
365     ide_cfata_identify_size(s);
366     s->identify_set = 1;
367 
368 fill_buffer:
369     memcpy(s->io_buffer, p, sizeof(s->identify_data));
370 }
371 
372 static void ide_set_signature(IDEState *s)
373 {
374     s->select &= ~(ATA_DEV_HS); /* clear head */
375     /* put signature */
376     s->nsector = 1;
377     s->sector = 1;
378     if (s->drive_kind == IDE_CD) {
379         s->lcyl = 0x14;
380         s->hcyl = 0xeb;
381     } else if (s->blk) {
382         s->lcyl = 0;
383         s->hcyl = 0;
384     } else {
385         s->lcyl = 0xff;
386         s->hcyl = 0xff;
387     }
388 }
389 
390 static bool ide_sect_range_ok(IDEState *s,
391                               uint64_t sector, uint64_t nb_sectors)
392 {
393     uint64_t total_sectors;
394 
395     blk_get_geometry(s->blk, &total_sectors);
396     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
397         return false;
398     }
399     return true;
400 }
401 
402 typedef struct TrimAIOCB {
403     BlockAIOCB common;
404     IDEState *s;
405     QEMUBH *bh;
406     int ret;
407     QEMUIOVector *qiov;
408     BlockAIOCB *aiocb;
409     int i, j;
410 } TrimAIOCB;
411 
412 static void trim_aio_cancel(BlockAIOCB *acb)
413 {
414     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
415 
416     /* Exit the loop so ide_issue_trim_cb will not continue  */
417     iocb->j = iocb->qiov->niov - 1;
418     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
419 
420     iocb->ret = -ECANCELED;
421 
422     if (iocb->aiocb) {
423         blk_aio_cancel_async(iocb->aiocb);
424         iocb->aiocb = NULL;
425     }
426 }
427 
428 static const AIOCBInfo trim_aiocb_info = {
429     .aiocb_size         = sizeof(TrimAIOCB),
430     .cancel_async       = trim_aio_cancel,
431 };
432 
433 static void ide_trim_bh_cb(void *opaque)
434 {
435     TrimAIOCB *iocb = opaque;
436 
437     iocb->common.cb(iocb->common.opaque, iocb->ret);
438 
439     qemu_bh_delete(iocb->bh);
440     iocb->bh = NULL;
441     qemu_aio_unref(iocb);
442 }
443 
444 static void ide_issue_trim_cb(void *opaque, int ret)
445 {
446     TrimAIOCB *iocb = opaque;
447     IDEState *s = iocb->s;
448 
449     if (iocb->i >= 0) {
450         if (ret >= 0) {
451             block_acct_done(blk_get_stats(s->blk), &s->acct);
452         } else {
453             block_acct_failed(blk_get_stats(s->blk), &s->acct);
454         }
455     }
456 
457     if (ret >= 0) {
458         while (iocb->j < iocb->qiov->niov) {
459             int j = iocb->j;
460             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
461                 int i = iocb->i;
462                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
463 
464                 /* 6-byte LBA + 2-byte range per entry */
465                 uint64_t entry = le64_to_cpu(buffer[i]);
466                 uint64_t sector = entry & 0x0000ffffffffffffULL;
467                 uint16_t count = entry >> 48;
468 
469                 if (count == 0) {
470                     continue;
471                 }
472 
473                 if (!ide_sect_range_ok(s, sector, count)) {
474                     block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
475                     iocb->ret = -EINVAL;
476                     goto done;
477                 }
478 
479                 block_acct_start(blk_get_stats(s->blk), &s->acct,
480                                  count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
481 
482                 /* Got an entry! Submit and exit.  */
483                 iocb->aiocb = blk_aio_pdiscard(s->blk,
484                                                sector << BDRV_SECTOR_BITS,
485                                                count << BDRV_SECTOR_BITS,
486                                                ide_issue_trim_cb, opaque);
487                 return;
488             }
489 
490             iocb->j++;
491             iocb->i = -1;
492         }
493     } else {
494         iocb->ret = ret;
495     }
496 
497 done:
498     iocb->aiocb = NULL;
499     if (iocb->bh) {
500         replay_bh_schedule_event(iocb->bh);
501     }
502 }
503 
504 BlockAIOCB *ide_issue_trim(
505         int64_t offset, QEMUIOVector *qiov,
506         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
507 {
508     IDEState *s = opaque;
509     TrimAIOCB *iocb;
510 
511     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
512     iocb->s = s;
513     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
514     iocb->ret = 0;
515     iocb->qiov = qiov;
516     iocb->i = -1;
517     iocb->j = 0;
518     ide_issue_trim_cb(iocb, 0);
519     return &iocb->common;
520 }
521 
522 void ide_abort_command(IDEState *s)
523 {
524     ide_transfer_stop(s);
525     s->status = READY_STAT | ERR_STAT;
526     s->error = ABRT_ERR;
527 }
528 
529 static void ide_set_retry(IDEState *s)
530 {
531     s->bus->retry_unit = s->unit;
532     s->bus->retry_sector_num = ide_get_sector(s);
533     s->bus->retry_nsector = s->nsector;
534 }
535 
536 static void ide_clear_retry(IDEState *s)
537 {
538     s->bus->retry_unit = -1;
539     s->bus->retry_sector_num = 0;
540     s->bus->retry_nsector = 0;
541 }
542 
543 /* prepare data transfer and tell what to do after */
544 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
545                                   EndTransferFunc *end_transfer_func)
546 {
547     s->data_ptr = buf;
548     s->data_end = buf + size;
549     ide_set_retry(s);
550     if (!(s->status & ERR_STAT)) {
551         s->status |= DRQ_STAT;
552     }
553     if (!s->bus->dma->ops->pio_transfer) {
554         s->end_transfer_func = end_transfer_func;
555         return false;
556     }
557     s->bus->dma->ops->pio_transfer(s->bus->dma);
558     return true;
559 }
560 
561 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
562                         EndTransferFunc *end_transfer_func)
563 {
564     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
565         end_transfer_func(s);
566     }
567 }
568 
569 static void ide_cmd_done(IDEState *s)
570 {
571     if (s->bus->dma->ops->cmd_done) {
572         s->bus->dma->ops->cmd_done(s->bus->dma);
573     }
574 }
575 
576 static void ide_transfer_halt(IDEState *s)
577 {
578     s->end_transfer_func = ide_transfer_stop;
579     s->data_ptr = s->io_buffer;
580     s->data_end = s->io_buffer;
581     s->status &= ~DRQ_STAT;
582 }
583 
584 void ide_transfer_stop(IDEState *s)
585 {
586     ide_transfer_halt(s);
587     ide_cmd_done(s);
588 }
589 
590 int64_t ide_get_sector(IDEState *s)
591 {
592     int64_t sector_num;
593     if (s->select & (ATA_DEV_LBA)) {
594         if (s->lba48) {
595             sector_num = ((int64_t)s->hob_hcyl << 40) |
596                 ((int64_t) s->hob_lcyl << 32) |
597                 ((int64_t) s->hob_sector << 24) |
598                 ((int64_t) s->hcyl << 16) |
599                 ((int64_t) s->lcyl << 8) | s->sector;
600         } else {
601             /* LBA28 */
602             sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
603                 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
604         }
605     } else {
606         /* CHS */
607         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
608             (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
609     }
610 
611     return sector_num;
612 }
613 
614 void ide_set_sector(IDEState *s, int64_t sector_num)
615 {
616     unsigned int cyl, r;
617     if (s->select & (ATA_DEV_LBA)) {
618         if (s->lba48) {
619             s->sector = sector_num;
620             s->lcyl = sector_num >> 8;
621             s->hcyl = sector_num >> 16;
622             s->hob_sector = sector_num >> 24;
623             s->hob_lcyl = sector_num >> 32;
624             s->hob_hcyl = sector_num >> 40;
625         } else {
626             /* LBA28 */
627             s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
628                 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
629             s->hcyl = (sector_num >> 16);
630             s->lcyl = (sector_num >> 8);
631             s->sector = (sector_num);
632         }
633     } else {
634         /* CHS */
635         cyl = sector_num / (s->heads * s->sectors);
636         r = sector_num % (s->heads * s->sectors);
637         s->hcyl = cyl >> 8;
638         s->lcyl = cyl;
639         s->select = (s->select & ~(ATA_DEV_HS)) |
640             ((r / s->sectors) & (ATA_DEV_HS));
641         s->sector = (r % s->sectors) + 1;
642     }
643 }
644 
645 static void ide_rw_error(IDEState *s) {
646     ide_abort_command(s);
647     ide_set_irq(s->bus);
648 }
649 
650 static void ide_buffered_readv_cb(void *opaque, int ret)
651 {
652     IDEBufferedRequest *req = opaque;
653     if (!req->orphaned) {
654         if (!ret) {
655             assert(req->qiov.size == req->original_qiov->size);
656             qemu_iovec_from_buf(req->original_qiov, 0,
657                                 req->qiov.local_iov.iov_base,
658                                 req->original_qiov->size);
659         }
660         req->original_cb(req->original_opaque, ret);
661     }
662     QLIST_REMOVE(req, list);
663     qemu_vfree(qemu_iovec_buf(&req->qiov));
664     g_free(req);
665 }
666 
667 #define MAX_BUFFERED_REQS 16
668 
669 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
670                                QEMUIOVector *iov, int nb_sectors,
671                                BlockCompletionFunc *cb, void *opaque)
672 {
673     BlockAIOCB *aioreq;
674     IDEBufferedRequest *req;
675     int c = 0;
676 
677     QLIST_FOREACH(req, &s->buffered_requests, list) {
678         c++;
679     }
680     if (c > MAX_BUFFERED_REQS) {
681         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
682     }
683 
684     req = g_new0(IDEBufferedRequest, 1);
685     req->original_qiov = iov;
686     req->original_cb = cb;
687     req->original_opaque = opaque;
688     qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
689                         iov->size);
690 
691     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
692                             &req->qiov, 0, ide_buffered_readv_cb, req);
693 
694     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
695     return aioreq;
696 }
697 
698 /**
699  * Cancel all pending DMA requests.
700  * Any buffered DMA requests are instantly canceled,
701  * but any pending unbuffered DMA requests must be waited on.
702  */
703 void ide_cancel_dma_sync(IDEState *s)
704 {
705     IDEBufferedRequest *req;
706 
707     /* First invoke the callbacks of all buffered requests
708      * and flag those requests as orphaned. Ideally there
709      * are no unbuffered (Scatter Gather DMA Requests or
710      * write requests) pending and we can avoid to drain. */
711     QLIST_FOREACH(req, &s->buffered_requests, list) {
712         if (!req->orphaned) {
713             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
714             req->original_cb(req->original_opaque, -ECANCELED);
715         }
716         req->orphaned = true;
717     }
718 
719     /*
720      * We can't cancel Scatter Gather DMA in the middle of the
721      * operation or a partial (not full) DMA transfer would reach
722      * the storage so we wait for completion instead (we behave
723      * like if the DMA was completed by the time the guest trying
724      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
725      * set).
726      *
727      * In the future we'll be able to safely cancel the I/O if the
728      * whole DMA operation will be submitted to disk with a single
729      * aio operation with preadv/pwritev.
730      */
731     if (s->bus->dma->aiocb) {
732         trace_ide_cancel_dma_sync_remaining();
733         blk_drain(s->blk);
734         assert(s->bus->dma->aiocb == NULL);
735     }
736 }
737 
738 static void ide_sector_read(IDEState *s);
739 
740 static void ide_sector_read_cb(void *opaque, int ret)
741 {
742     IDEState *s = opaque;
743     int n;
744 
745     s->pio_aiocb = NULL;
746     s->status &= ~BUSY_STAT;
747 
748     if (ret != 0) {
749         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
750                                 IDE_RETRY_READ)) {
751             return;
752         }
753     }
754 
755     block_acct_done(blk_get_stats(s->blk), &s->acct);
756 
757     n = s->nsector;
758     if (n > s->req_nb_sectors) {
759         n = s->req_nb_sectors;
760     }
761 
762     ide_set_sector(s, ide_get_sector(s) + n);
763     s->nsector -= n;
764     /* Allow the guest to read the io_buffer */
765     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
766     ide_set_irq(s->bus);
767 }
768 
769 static void ide_sector_read(IDEState *s)
770 {
771     int64_t sector_num;
772     int n;
773 
774     s->status = READY_STAT | SEEK_STAT;
775     s->error = 0; /* not needed by IDE spec, but needed by Windows */
776     sector_num = ide_get_sector(s);
777     n = s->nsector;
778 
779     if (n == 0) {
780         ide_transfer_stop(s);
781         return;
782     }
783 
784     s->status |= BUSY_STAT;
785 
786     if (n > s->req_nb_sectors) {
787         n = s->req_nb_sectors;
788     }
789 
790     trace_ide_sector_read(sector_num, n);
791 
792     if (!ide_sect_range_ok(s, sector_num, n)) {
793         ide_rw_error(s);
794         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
795         return;
796     }
797 
798     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
799 
800     block_acct_start(blk_get_stats(s->blk), &s->acct,
801                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
802     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
803                                       ide_sector_read_cb, s);
804 }
805 
806 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
807 {
808     if (s->bus->dma->ops->commit_buf) {
809         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
810     }
811     s->io_buffer_offset += tx_bytes;
812     qemu_sglist_destroy(&s->sg);
813 }
814 
815 void ide_set_inactive(IDEState *s, bool more)
816 {
817     s->bus->dma->aiocb = NULL;
818     ide_clear_retry(s);
819     if (s->bus->dma->ops->set_inactive) {
820         s->bus->dma->ops->set_inactive(s->bus->dma, more);
821     }
822     ide_cmd_done(s);
823 }
824 
825 void ide_dma_error(IDEState *s)
826 {
827     dma_buf_commit(s, 0);
828     ide_abort_command(s);
829     ide_set_inactive(s, false);
830     ide_set_irq(s->bus);
831 }
832 
833 int ide_handle_rw_error(IDEState *s, int error, int op)
834 {
835     bool is_read = (op & IDE_RETRY_READ) != 0;
836     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
837 
838     if (action == BLOCK_ERROR_ACTION_STOP) {
839         assert(s->bus->retry_unit == s->unit);
840         s->bus->error_status = op;
841     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
842         block_acct_failed(blk_get_stats(s->blk), &s->acct);
843         if (IS_IDE_RETRY_DMA(op)) {
844             ide_dma_error(s);
845         } else if (IS_IDE_RETRY_ATAPI(op)) {
846             ide_atapi_io_error(s, -error);
847         } else {
848             ide_rw_error(s);
849         }
850     }
851     blk_error_action(s->blk, action, is_read, error);
852     return action != BLOCK_ERROR_ACTION_IGNORE;
853 }
854 
855 static void ide_dma_cb(void *opaque, int ret)
856 {
857     IDEState *s = opaque;
858     int n;
859     int64_t sector_num;
860     uint64_t offset;
861     bool stay_active = false;
862     int32_t prep_size = 0;
863 
864     if (ret == -EINVAL) {
865         ide_dma_error(s);
866         return;
867     }
868 
869     if (ret < 0) {
870         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
871             s->bus->dma->aiocb = NULL;
872             dma_buf_commit(s, 0);
873             return;
874         }
875     }
876 
877     if (s->io_buffer_size > s->nsector * 512) {
878         /*
879          * The PRDs were longer than needed for this request.
880          * The Active bit must remain set after the request completes.
881          */
882         n = s->nsector;
883         stay_active = true;
884     } else {
885         n = s->io_buffer_size >> 9;
886     }
887 
888     sector_num = ide_get_sector(s);
889     if (n > 0) {
890         assert(n * 512 == s->sg.size);
891         dma_buf_commit(s, s->sg.size);
892         sector_num += n;
893         ide_set_sector(s, sector_num);
894         s->nsector -= n;
895     }
896 
897     /* end of transfer ? */
898     if (s->nsector == 0) {
899         s->status = READY_STAT | SEEK_STAT;
900         ide_set_irq(s->bus);
901         goto eot;
902     }
903 
904     /* launch next transfer */
905     n = s->nsector;
906     s->io_buffer_index = 0;
907     s->io_buffer_size = n * 512;
908     prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
909     /* prepare_buf() must succeed and respect the limit */
910     assert(prep_size >= 0 && prep_size <= n * 512);
911 
912     /*
913      * Now prep_size stores the number of bytes in the sglist, and
914      * s->io_buffer_size stores the number of bytes described by the PRDs.
915      */
916 
917     if (prep_size < n * 512) {
918         /*
919          * The PRDs are too short for this request. Error condition!
920          * Reset the Active bit and don't raise the interrupt.
921          */
922         s->status = READY_STAT | SEEK_STAT;
923         dma_buf_commit(s, 0);
924         goto eot;
925     }
926 
927     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
928 
929     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
930         !ide_sect_range_ok(s, sector_num, n)) {
931         ide_dma_error(s);
932         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
933         return;
934     }
935 
936     offset = sector_num << BDRV_SECTOR_BITS;
937     switch (s->dma_cmd) {
938     case IDE_DMA_READ:
939         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
940                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
941         break;
942     case IDE_DMA_WRITE:
943         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
944                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
945         break;
946     case IDE_DMA_TRIM:
947         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
948                                         &s->sg, offset, BDRV_SECTOR_SIZE,
949                                         ide_issue_trim, s, ide_dma_cb, s,
950                                         DMA_DIRECTION_TO_DEVICE);
951         break;
952     default:
953         abort();
954     }
955     return;
956 
957 eot:
958     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
959         block_acct_done(blk_get_stats(s->blk), &s->acct);
960     }
961     ide_set_inactive(s, stay_active);
962 }
963 
964 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
965 {
966     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
967     s->io_buffer_size = 0;
968     s->dma_cmd = dma_cmd;
969 
970     switch (dma_cmd) {
971     case IDE_DMA_READ:
972         block_acct_start(blk_get_stats(s->blk), &s->acct,
973                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
974         break;
975     case IDE_DMA_WRITE:
976         block_acct_start(blk_get_stats(s->blk), &s->acct,
977                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
978         break;
979     default:
980         break;
981     }
982 
983     ide_start_dma(s, ide_dma_cb);
984 }
985 
986 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
987 {
988     s->io_buffer_index = 0;
989     ide_set_retry(s);
990     if (s->bus->dma->ops->start_dma) {
991         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
992     }
993 }
994 
995 static void ide_sector_write(IDEState *s);
996 
997 static void ide_sector_write_timer_cb(void *opaque)
998 {
999     IDEState *s = opaque;
1000     ide_set_irq(s->bus);
1001 }
1002 
1003 static void ide_sector_write_cb(void *opaque, int ret)
1004 {
1005     IDEState *s = opaque;
1006     int n;
1007 
1008     s->pio_aiocb = NULL;
1009     s->status &= ~BUSY_STAT;
1010 
1011     if (ret != 0) {
1012         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1013             return;
1014         }
1015     }
1016 
1017     block_acct_done(blk_get_stats(s->blk), &s->acct);
1018 
1019     n = s->nsector;
1020     if (n > s->req_nb_sectors) {
1021         n = s->req_nb_sectors;
1022     }
1023     s->nsector -= n;
1024 
1025     ide_set_sector(s, ide_get_sector(s) + n);
1026     if (s->nsector == 0) {
1027         /* no more sectors to write */
1028         ide_transfer_stop(s);
1029     } else {
1030         int n1 = s->nsector;
1031         if (n1 > s->req_nb_sectors) {
1032             n1 = s->req_nb_sectors;
1033         }
1034         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1035                            ide_sector_write);
1036     }
1037 
1038     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1039         /* It seems there is a bug in the Windows 2000 installer HDD
1040            IDE driver which fills the disk with empty logs when the
1041            IDE write IRQ comes too early. This hack tries to correct
1042            that at the expense of slower write performances. Use this
1043            option _only_ to install Windows 2000. You must disable it
1044            for normal use. */
1045         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1046                   (NANOSECONDS_PER_SECOND / 1000));
1047     } else {
1048         ide_set_irq(s->bus);
1049     }
1050 }
1051 
1052 static void ide_sector_write(IDEState *s)
1053 {
1054     int64_t sector_num;
1055     int n;
1056 
1057     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1058     sector_num = ide_get_sector(s);
1059 
1060     n = s->nsector;
1061     if (n > s->req_nb_sectors) {
1062         n = s->req_nb_sectors;
1063     }
1064 
1065     trace_ide_sector_write(sector_num, n);
1066 
1067     if (!ide_sect_range_ok(s, sector_num, n)) {
1068         ide_rw_error(s);
1069         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1070         return;
1071     }
1072 
1073     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1074 
1075     block_acct_start(blk_get_stats(s->blk), &s->acct,
1076                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1077     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1078                                    &s->qiov, 0, ide_sector_write_cb, s);
1079 }
1080 
1081 static void ide_flush_cb(void *opaque, int ret)
1082 {
1083     IDEState *s = opaque;
1084 
1085     s->pio_aiocb = NULL;
1086 
1087     if (ret < 0) {
1088         /* XXX: What sector number to set here? */
1089         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1090             return;
1091         }
1092     }
1093 
1094     if (s->blk) {
1095         block_acct_done(blk_get_stats(s->blk), &s->acct);
1096     }
1097     s->status = READY_STAT | SEEK_STAT;
1098     ide_cmd_done(s);
1099     ide_set_irq(s->bus);
1100 }
1101 
1102 static void ide_flush_cache(IDEState *s)
1103 {
1104     if (s->blk == NULL) {
1105         ide_flush_cb(s, 0);
1106         return;
1107     }
1108 
1109     s->status |= BUSY_STAT;
1110     ide_set_retry(s);
1111     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1112     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1113 }
1114 
1115 static void ide_cfata_metadata_inquiry(IDEState *s)
1116 {
1117     uint16_t *p;
1118     uint32_t spd;
1119 
1120     p = (uint16_t *) s->io_buffer;
1121     memset(p, 0, 0x200);
1122     spd = ((s->mdata_size - 1) >> 9) + 1;
1123 
1124     put_le16(p + 0, 0x0001);			/* Data format revision */
1125     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1126     put_le16(p + 2, s->media_changed);		/* Media status */
1127     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1128     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1129     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1130     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1131 }
1132 
1133 static void ide_cfata_metadata_read(IDEState *s)
1134 {
1135     uint16_t *p;
1136 
1137     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1138         s->status = ERR_STAT;
1139         s->error = ABRT_ERR;
1140         return;
1141     }
1142 
1143     p = (uint16_t *) s->io_buffer;
1144     memset(p, 0, 0x200);
1145 
1146     put_le16(p + 0, s->media_changed);		/* Media status */
1147     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1148                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1149                                     s->nsector << 9), 0x200 - 2));
1150 }
1151 
1152 static void ide_cfata_metadata_write(IDEState *s)
1153 {
1154     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1155         s->status = ERR_STAT;
1156         s->error = ABRT_ERR;
1157         return;
1158     }
1159 
1160     s->media_changed = 0;
1161 
1162     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1163                     s->io_buffer + 2,
1164                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1165                                     s->nsector << 9), 0x200 - 2));
1166 }
1167 
1168 /* called when the inserted state of the media has changed */
1169 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1170 {
1171     IDEState *s = opaque;
1172     uint64_t nb_sectors;
1173 
1174     s->tray_open = !load;
1175     blk_get_geometry(s->blk, &nb_sectors);
1176     s->nb_sectors = nb_sectors;
1177 
1178     /*
1179      * First indicate to the guest that a CD has been removed.  That's
1180      * done on the next command the guest sends us.
1181      *
1182      * Then we set UNIT_ATTENTION, by which the guest will
1183      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1184      */
1185     s->cdrom_changed = 1;
1186     s->events.new_media = true;
1187     s->events.eject_request = false;
1188     ide_set_irq(s->bus);
1189 }
1190 
1191 static void ide_cd_eject_request_cb(void *opaque, bool force)
1192 {
1193     IDEState *s = opaque;
1194 
1195     s->events.eject_request = true;
1196     if (force) {
1197         s->tray_locked = false;
1198     }
1199     ide_set_irq(s->bus);
1200 }
1201 
1202 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1203 {
1204     s->lba48 = lba48;
1205 
1206     /* handle the 'magic' 0 nsector count conversion here. to avoid
1207      * fiddling with the rest of the read logic, we just store the
1208      * full sector count in ->nsector and ignore ->hob_nsector from now
1209      */
1210     if (!s->lba48) {
1211         if (!s->nsector)
1212             s->nsector = 256;
1213     } else {
1214         if (!s->nsector && !s->hob_nsector)
1215             s->nsector = 65536;
1216         else {
1217             int lo = s->nsector;
1218             int hi = s->hob_nsector;
1219 
1220             s->nsector = (hi << 8) | lo;
1221         }
1222     }
1223 }
1224 
1225 static void ide_clear_hob(IDEBus *bus)
1226 {
1227     /* any write clears HOB high bit of device control register */
1228     bus->cmd &= ~(IDE_CTRL_HOB);
1229 }
1230 
1231 /* IOport [W]rite [R]egisters */
1232 enum ATA_IOPORT_WR {
1233     ATA_IOPORT_WR_DATA = 0,
1234     ATA_IOPORT_WR_FEATURES = 1,
1235     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1236     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1237     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1238     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1239     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1240     ATA_IOPORT_WR_COMMAND = 7,
1241     ATA_IOPORT_WR_NUM_REGISTERS,
1242 };
1243 
1244 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1245     [ATA_IOPORT_WR_DATA] = "Data",
1246     [ATA_IOPORT_WR_FEATURES] = "Features",
1247     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1248     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1249     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1250     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1251     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1252     [ATA_IOPORT_WR_COMMAND] = "Command"
1253 };
1254 
1255 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1256 {
1257     IDEBus *bus = opaque;
1258     IDEState *s = idebus_active_if(bus);
1259     int reg_num = addr & 7;
1260 
1261     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1262 
1263     /* ignore writes to command block while busy with previous command */
1264     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1265         return;
1266     }
1267 
1268     /* NOTE: Device0 and Device1 both receive incoming register writes.
1269      * (They're on the same bus! They have to!) */
1270 
1271     switch (reg_num) {
1272     case 0:
1273         break;
1274     case ATA_IOPORT_WR_FEATURES:
1275         ide_clear_hob(bus);
1276         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1277         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1278         bus->ifs[0].feature = val;
1279         bus->ifs[1].feature = val;
1280         break;
1281     case ATA_IOPORT_WR_SECTOR_COUNT:
1282         ide_clear_hob(bus);
1283         bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1284         bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1285         bus->ifs[0].nsector = val;
1286         bus->ifs[1].nsector = val;
1287         break;
1288     case ATA_IOPORT_WR_SECTOR_NUMBER:
1289         ide_clear_hob(bus);
1290         bus->ifs[0].hob_sector = bus->ifs[0].sector;
1291         bus->ifs[1].hob_sector = bus->ifs[1].sector;
1292         bus->ifs[0].sector = val;
1293         bus->ifs[1].sector = val;
1294         break;
1295     case ATA_IOPORT_WR_CYLINDER_LOW:
1296         ide_clear_hob(bus);
1297         bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1298         bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1299         bus->ifs[0].lcyl = val;
1300         bus->ifs[1].lcyl = val;
1301         break;
1302     case ATA_IOPORT_WR_CYLINDER_HIGH:
1303         ide_clear_hob(bus);
1304         bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1305         bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1306         bus->ifs[0].hcyl = val;
1307         bus->ifs[1].hcyl = val;
1308         break;
1309     case ATA_IOPORT_WR_DEVICE_HEAD:
1310         ide_clear_hob(bus);
1311         bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1312         bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1313         /* select drive */
1314         bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1315         break;
1316     default:
1317     case ATA_IOPORT_WR_COMMAND:
1318         ide_clear_hob(bus);
1319         qemu_irq_lower(bus->irq);
1320         ide_exec_cmd(bus, val);
1321         break;
1322     }
1323 }
1324 
1325 static void ide_reset(IDEState *s)
1326 {
1327     trace_ide_reset(s);
1328 
1329     if (s->pio_aiocb) {
1330         blk_aio_cancel(s->pio_aiocb);
1331         s->pio_aiocb = NULL;
1332     }
1333 
1334     if (s->drive_kind == IDE_CFATA)
1335         s->mult_sectors = 0;
1336     else
1337         s->mult_sectors = MAX_MULT_SECTORS;
1338     /* ide regs */
1339     s->feature = 0;
1340     s->error = 0;
1341     s->nsector = 0;
1342     s->sector = 0;
1343     s->lcyl = 0;
1344     s->hcyl = 0;
1345 
1346     /* lba48 */
1347     s->hob_feature = 0;
1348     s->hob_sector = 0;
1349     s->hob_nsector = 0;
1350     s->hob_lcyl = 0;
1351     s->hob_hcyl = 0;
1352 
1353     s->select = (ATA_DEV_ALWAYS_ON);
1354     s->status = READY_STAT | SEEK_STAT;
1355 
1356     s->lba48 = 0;
1357 
1358     /* ATAPI specific */
1359     s->sense_key = 0;
1360     s->asc = 0;
1361     s->cdrom_changed = 0;
1362     s->packet_transfer_size = 0;
1363     s->elementary_transfer_size = 0;
1364     s->io_buffer_index = 0;
1365     s->cd_sector_size = 0;
1366     s->atapi_dma = 0;
1367     s->tray_locked = 0;
1368     s->tray_open = 0;
1369     /* ATA DMA state */
1370     s->io_buffer_size = 0;
1371     s->req_nb_sectors = 0;
1372 
1373     ide_set_signature(s);
1374     /* init the transfer handler so that 0xffff is returned on data
1375        accesses */
1376     s->end_transfer_func = ide_dummy_transfer_stop;
1377     ide_dummy_transfer_stop(s);
1378     s->media_changed = 0;
1379 }
1380 
1381 static bool cmd_nop(IDEState *s, uint8_t cmd)
1382 {
1383     return true;
1384 }
1385 
1386 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1387 {
1388     /* Halt PIO (in the DRQ phase), then DMA */
1389     ide_transfer_halt(s);
1390     ide_cancel_dma_sync(s);
1391 
1392     /* Reset any PIO commands, reset signature, etc */
1393     ide_reset(s);
1394 
1395     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1396      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1397     s->status = 0x00;
1398 
1399     /* Do not overwrite status register */
1400     return false;
1401 }
1402 
1403 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1404 {
1405     switch (s->feature) {
1406     case DSM_TRIM:
1407         if (s->blk) {
1408             ide_sector_start_dma(s, IDE_DMA_TRIM);
1409             return false;
1410         }
1411         break;
1412     }
1413 
1414     ide_abort_command(s);
1415     return true;
1416 }
1417 
1418 static bool cmd_identify(IDEState *s, uint8_t cmd)
1419 {
1420     if (s->blk && s->drive_kind != IDE_CD) {
1421         if (s->drive_kind != IDE_CFATA) {
1422             ide_identify(s);
1423         } else {
1424             ide_cfata_identify(s);
1425         }
1426         s->status = READY_STAT | SEEK_STAT;
1427         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1428         ide_set_irq(s->bus);
1429         return false;
1430     } else {
1431         if (s->drive_kind == IDE_CD) {
1432             ide_set_signature(s);
1433         }
1434         ide_abort_command(s);
1435     }
1436 
1437     return true;
1438 }
1439 
1440 static bool cmd_verify(IDEState *s, uint8_t cmd)
1441 {
1442     bool lba48 = (cmd == WIN_VERIFY_EXT);
1443 
1444     /* do sector number check ? */
1445     ide_cmd_lba48_transform(s, lba48);
1446 
1447     return true;
1448 }
1449 
1450 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1451 {
1452     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1453         /* Disable Read and Write Multiple */
1454         s->mult_sectors = 0;
1455     } else if ((s->nsector & 0xff) != 0 &&
1456         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1457          (s->nsector & (s->nsector - 1)) != 0)) {
1458         ide_abort_command(s);
1459     } else {
1460         s->mult_sectors = s->nsector & 0xff;
1461     }
1462 
1463     return true;
1464 }
1465 
1466 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1467 {
1468     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1469 
1470     if (!s->blk || !s->mult_sectors) {
1471         ide_abort_command(s);
1472         return true;
1473     }
1474 
1475     ide_cmd_lba48_transform(s, lba48);
1476     s->req_nb_sectors = s->mult_sectors;
1477     ide_sector_read(s);
1478     return false;
1479 }
1480 
1481 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1482 {
1483     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1484     int n;
1485 
1486     if (!s->blk || !s->mult_sectors) {
1487         ide_abort_command(s);
1488         return true;
1489     }
1490 
1491     ide_cmd_lba48_transform(s, lba48);
1492 
1493     s->req_nb_sectors = s->mult_sectors;
1494     n = MIN(s->nsector, s->req_nb_sectors);
1495 
1496     s->status = SEEK_STAT | READY_STAT;
1497     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1498 
1499     s->media_changed = 1;
1500 
1501     return false;
1502 }
1503 
1504 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1505 {
1506     bool lba48 = (cmd == WIN_READ_EXT);
1507 
1508     if (s->drive_kind == IDE_CD) {
1509         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1510         ide_abort_command(s);
1511         return true;
1512     }
1513 
1514     if (!s->blk) {
1515         ide_abort_command(s);
1516         return true;
1517     }
1518 
1519     ide_cmd_lba48_transform(s, lba48);
1520     s->req_nb_sectors = 1;
1521     ide_sector_read(s);
1522 
1523     return false;
1524 }
1525 
1526 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1527 {
1528     bool lba48 = (cmd == WIN_WRITE_EXT);
1529 
1530     if (!s->blk) {
1531         ide_abort_command(s);
1532         return true;
1533     }
1534 
1535     ide_cmd_lba48_transform(s, lba48);
1536 
1537     s->req_nb_sectors = 1;
1538     s->status = SEEK_STAT | READY_STAT;
1539     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1540 
1541     s->media_changed = 1;
1542 
1543     return false;
1544 }
1545 
1546 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1547 {
1548     bool lba48 = (cmd == WIN_READDMA_EXT);
1549 
1550     if (!s->blk) {
1551         ide_abort_command(s);
1552         return true;
1553     }
1554 
1555     ide_cmd_lba48_transform(s, lba48);
1556     ide_sector_start_dma(s, IDE_DMA_READ);
1557 
1558     return false;
1559 }
1560 
1561 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1562 {
1563     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1564 
1565     if (!s->blk) {
1566         ide_abort_command(s);
1567         return true;
1568     }
1569 
1570     ide_cmd_lba48_transform(s, lba48);
1571     ide_sector_start_dma(s, IDE_DMA_WRITE);
1572 
1573     s->media_changed = 1;
1574 
1575     return false;
1576 }
1577 
1578 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1579 {
1580     ide_flush_cache(s);
1581     return false;
1582 }
1583 
1584 static bool cmd_seek(IDEState *s, uint8_t cmd)
1585 {
1586     /* XXX: Check that seek is within bounds */
1587     return true;
1588 }
1589 
1590 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1591 {
1592     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1593 
1594     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1595     if (s->nb_sectors == 0) {
1596         ide_abort_command(s);
1597         return true;
1598     }
1599 
1600     ide_cmd_lba48_transform(s, lba48);
1601     ide_set_sector(s, s->nb_sectors - 1);
1602 
1603     return true;
1604 }
1605 
1606 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1607 {
1608     s->nsector = 0xff; /* device active or idle */
1609     return true;
1610 }
1611 
1612 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1613 {
1614     uint16_t *identify_data;
1615 
1616     if (!s->blk) {
1617         ide_abort_command(s);
1618         return true;
1619     }
1620 
1621     /* XXX: valid for CDROM ? */
1622     switch (s->feature) {
1623     case 0x02: /* write cache enable */
1624         blk_set_enable_write_cache(s->blk, true);
1625         identify_data = (uint16_t *)s->identify_data;
1626         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1627         return true;
1628     case 0x82: /* write cache disable */
1629         blk_set_enable_write_cache(s->blk, false);
1630         identify_data = (uint16_t *)s->identify_data;
1631         put_le16(identify_data + 85, (1 << 14) | 1);
1632         ide_flush_cache(s);
1633         return false;
1634     case 0xcc: /* reverting to power-on defaults enable */
1635     case 0x66: /* reverting to power-on defaults disable */
1636     case 0xaa: /* read look-ahead enable */
1637     case 0x55: /* read look-ahead disable */
1638     case 0x05: /* set advanced power management mode */
1639     case 0x85: /* disable advanced power management mode */
1640     case 0x69: /* NOP */
1641     case 0x67: /* NOP */
1642     case 0x96: /* NOP */
1643     case 0x9a: /* NOP */
1644     case 0x42: /* enable Automatic Acoustic Mode */
1645     case 0xc2: /* disable Automatic Acoustic Mode */
1646         return true;
1647     case 0x03: /* set transfer mode */
1648         {
1649             uint8_t val = s->nsector & 0x07;
1650             identify_data = (uint16_t *)s->identify_data;
1651 
1652             switch (s->nsector >> 3) {
1653             case 0x00: /* pio default */
1654             case 0x01: /* pio mode */
1655                 put_le16(identify_data + 62, 0x07);
1656                 put_le16(identify_data + 63, 0x07);
1657                 put_le16(identify_data + 88, 0x3f);
1658                 break;
1659             case 0x02: /* sigle word dma mode*/
1660                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1661                 put_le16(identify_data + 63, 0x07);
1662                 put_le16(identify_data + 88, 0x3f);
1663                 break;
1664             case 0x04: /* mdma mode */
1665                 put_le16(identify_data + 62, 0x07);
1666                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1667                 put_le16(identify_data + 88, 0x3f);
1668                 break;
1669             case 0x08: /* udma mode */
1670                 put_le16(identify_data + 62, 0x07);
1671                 put_le16(identify_data + 63, 0x07);
1672                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1673                 break;
1674             default:
1675                 goto abort_cmd;
1676             }
1677             return true;
1678         }
1679     }
1680 
1681 abort_cmd:
1682     ide_abort_command(s);
1683     return true;
1684 }
1685 
1686 
1687 /*** ATAPI commands ***/
1688 
1689 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1690 {
1691     ide_atapi_identify(s);
1692     s->status = READY_STAT | SEEK_STAT;
1693     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1694     ide_set_irq(s->bus);
1695     return false;
1696 }
1697 
1698 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1699 {
1700     ide_set_signature(s);
1701 
1702     if (s->drive_kind == IDE_CD) {
1703         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1704                         * devices to return a clear status register
1705                         * with READY_STAT *not* set. */
1706         s->error = 0x01;
1707     } else {
1708         s->status = READY_STAT | SEEK_STAT;
1709         /* The bits of the error register are not as usual for this command!
1710          * They are part of the regular output (this is why ERR_STAT isn't set)
1711          * Device 0 passed, Device 1 passed or not present. */
1712         s->error = 0x01;
1713         ide_set_irq(s->bus);
1714     }
1715 
1716     return false;
1717 }
1718 
1719 static bool cmd_packet(IDEState *s, uint8_t cmd)
1720 {
1721     /* overlapping commands not supported */
1722     if (s->feature & 0x02) {
1723         ide_abort_command(s);
1724         return true;
1725     }
1726 
1727     s->status = READY_STAT | SEEK_STAT;
1728     s->atapi_dma = s->feature & 1;
1729     if (s->atapi_dma) {
1730         s->dma_cmd = IDE_DMA_ATAPI;
1731     }
1732     s->nsector = 1;
1733     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1734                        ide_atapi_cmd);
1735     return false;
1736 }
1737 
1738 
1739 /*** CF-ATA commands ***/
1740 
1741 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1742 {
1743     s->error = 0x09;    /* miscellaneous error */
1744     s->status = READY_STAT | SEEK_STAT;
1745     ide_set_irq(s->bus);
1746 
1747     return false;
1748 }
1749 
1750 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1751 {
1752     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1753      * required for Windows 8 to work with AHCI */
1754 
1755     if (cmd == CFA_WEAR_LEVEL) {
1756         s->nsector = 0;
1757     }
1758 
1759     if (cmd == CFA_ERASE_SECTORS) {
1760         s->media_changed = 1;
1761     }
1762 
1763     return true;
1764 }
1765 
1766 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1767 {
1768     s->status = READY_STAT | SEEK_STAT;
1769 
1770     memset(s->io_buffer, 0, 0x200);
1771     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1772     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1773     s->io_buffer[0x02] = s->select;                 /* Head */
1774     s->io_buffer[0x03] = s->sector;                 /* Sector */
1775     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1776     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1777     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1778     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1779     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1780     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1781     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1782 
1783     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1784     ide_set_irq(s->bus);
1785 
1786     return false;
1787 }
1788 
1789 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1790 {
1791     switch (s->feature) {
1792     case 0x02:  /* Inquiry Metadata Storage */
1793         ide_cfata_metadata_inquiry(s);
1794         break;
1795     case 0x03:  /* Read Metadata Storage */
1796         ide_cfata_metadata_read(s);
1797         break;
1798     case 0x04:  /* Write Metadata Storage */
1799         ide_cfata_metadata_write(s);
1800         break;
1801     default:
1802         ide_abort_command(s);
1803         return true;
1804     }
1805 
1806     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1807     s->status = 0x00; /* NOTE: READY is _not_ set */
1808     ide_set_irq(s->bus);
1809 
1810     return false;
1811 }
1812 
1813 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1814 {
1815     switch (s->feature) {
1816     case 0x01:  /* sense temperature in device */
1817         s->nsector = 0x50;      /* +20 C */
1818         break;
1819     default:
1820         ide_abort_command(s);
1821         return true;
1822     }
1823 
1824     return true;
1825 }
1826 
1827 
1828 /*** SMART commands ***/
1829 
1830 static bool cmd_smart(IDEState *s, uint8_t cmd)
1831 {
1832     int n;
1833 
1834     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1835         goto abort_cmd;
1836     }
1837 
1838     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1839         goto abort_cmd;
1840     }
1841 
1842     switch (s->feature) {
1843     case SMART_DISABLE:
1844         s->smart_enabled = 0;
1845         return true;
1846 
1847     case SMART_ENABLE:
1848         s->smart_enabled = 1;
1849         return true;
1850 
1851     case SMART_ATTR_AUTOSAVE:
1852         switch (s->sector) {
1853         case 0x00:
1854             s->smart_autosave = 0;
1855             break;
1856         case 0xf1:
1857             s->smart_autosave = 1;
1858             break;
1859         default:
1860             goto abort_cmd;
1861         }
1862         return true;
1863 
1864     case SMART_STATUS:
1865         if (!s->smart_errors) {
1866             s->hcyl = 0xc2;
1867             s->lcyl = 0x4f;
1868         } else {
1869             s->hcyl = 0x2c;
1870             s->lcyl = 0xf4;
1871         }
1872         return true;
1873 
1874     case SMART_READ_THRESH:
1875         memset(s->io_buffer, 0, 0x200);
1876         s->io_buffer[0] = 0x01; /* smart struct version */
1877 
1878         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1879             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1880             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1881         }
1882 
1883         /* checksum */
1884         for (n = 0; n < 511; n++) {
1885             s->io_buffer[511] += s->io_buffer[n];
1886         }
1887         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1888 
1889         s->status = READY_STAT | SEEK_STAT;
1890         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1891         ide_set_irq(s->bus);
1892         return false;
1893 
1894     case SMART_READ_DATA:
1895         memset(s->io_buffer, 0, 0x200);
1896         s->io_buffer[0] = 0x01; /* smart struct version */
1897 
1898         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1899             int i;
1900             for (i = 0; i < 11; i++) {
1901                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1902             }
1903         }
1904 
1905         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1906         if (s->smart_selftest_count == 0) {
1907             s->io_buffer[363] = 0;
1908         } else {
1909             s->io_buffer[363] =
1910                 s->smart_selftest_data[3 +
1911                            (s->smart_selftest_count - 1) *
1912                            24];
1913         }
1914         s->io_buffer[364] = 0x20;
1915         s->io_buffer[365] = 0x01;
1916         /* offline data collection capacity: execute + self-test*/
1917         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1918         s->io_buffer[368] = 0x03; /* smart capability (1) */
1919         s->io_buffer[369] = 0x00; /* smart capability (2) */
1920         s->io_buffer[370] = 0x01; /* error logging supported */
1921         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1922         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1923         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1924 
1925         for (n = 0; n < 511; n++) {
1926             s->io_buffer[511] += s->io_buffer[n];
1927         }
1928         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1929 
1930         s->status = READY_STAT | SEEK_STAT;
1931         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1932         ide_set_irq(s->bus);
1933         return false;
1934 
1935     case SMART_READ_LOG:
1936         switch (s->sector) {
1937         case 0x01: /* summary smart error log */
1938             memset(s->io_buffer, 0, 0x200);
1939             s->io_buffer[0] = 0x01;
1940             s->io_buffer[1] = 0x00; /* no error entries */
1941             s->io_buffer[452] = s->smart_errors & 0xff;
1942             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1943 
1944             for (n = 0; n < 511; n++) {
1945                 s->io_buffer[511] += s->io_buffer[n];
1946             }
1947             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1948             break;
1949         case 0x06: /* smart self test log */
1950             memset(s->io_buffer, 0, 0x200);
1951             s->io_buffer[0] = 0x01;
1952             if (s->smart_selftest_count == 0) {
1953                 s->io_buffer[508] = 0;
1954             } else {
1955                 s->io_buffer[508] = s->smart_selftest_count;
1956                 for (n = 2; n < 506; n++)  {
1957                     s->io_buffer[n] = s->smart_selftest_data[n];
1958                 }
1959             }
1960 
1961             for (n = 0; n < 511; n++) {
1962                 s->io_buffer[511] += s->io_buffer[n];
1963             }
1964             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1965             break;
1966         default:
1967             goto abort_cmd;
1968         }
1969         s->status = READY_STAT | SEEK_STAT;
1970         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1971         ide_set_irq(s->bus);
1972         return false;
1973 
1974     case SMART_EXECUTE_OFFLINE:
1975         switch (s->sector) {
1976         case 0: /* off-line routine */
1977         case 1: /* short self test */
1978         case 2: /* extended self test */
1979             s->smart_selftest_count++;
1980             if (s->smart_selftest_count > 21) {
1981                 s->smart_selftest_count = 1;
1982             }
1983             n = 2 + (s->smart_selftest_count - 1) * 24;
1984             s->smart_selftest_data[n] = s->sector;
1985             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1986             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1987             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1988             break;
1989         default:
1990             goto abort_cmd;
1991         }
1992         return true;
1993     }
1994 
1995 abort_cmd:
1996     ide_abort_command(s);
1997     return true;
1998 }
1999 
2000 #define HD_OK (1u << IDE_HD)
2001 #define CD_OK (1u << IDE_CD)
2002 #define CFA_OK (1u << IDE_CFATA)
2003 #define HD_CFA_OK (HD_OK | CFA_OK)
2004 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2005 
2006 /* Set the Disk Seek Completed status bit during completion */
2007 #define SET_DSC (1u << 8)
2008 
2009 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2010 static const struct {
2011     /* Returns true if the completion code should be run */
2012     bool (*handler)(IDEState *s, uint8_t cmd);
2013     int flags;
2014 } ide_cmd_table[0x100] = {
2015     /* NOP not implemented, mandatory for CD */
2016     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
2017     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2018     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2019     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2020     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2021     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2022     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2023     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2024     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2025     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2026     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2027     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2028     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2029     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2030     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2031     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2032     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2033     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2034     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2035     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2036     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2037     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2038     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2039     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2040     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2041     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2042     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2043     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2044     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2045     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2046     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2047     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2048     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2049     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2050     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2051     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2052     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2053     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2054     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2055     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2056     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2057     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2058     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2059     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2060     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2061     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2062     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2063     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2064     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2065     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2066     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2067     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2068     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2069     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2070     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2071     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2072 };
2073 
2074 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2075 {
2076     return cmd < ARRAY_SIZE(ide_cmd_table)
2077         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2078 }
2079 
2080 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2081 {
2082     IDEState *s;
2083     bool complete;
2084 
2085     s = idebus_active_if(bus);
2086     trace_ide_exec_cmd(bus, s, val);
2087 
2088     /* ignore commands to non existent slave */
2089     if (s != bus->ifs && !s->blk) {
2090         return;
2091     }
2092 
2093     /* Only RESET is allowed while BSY and/or DRQ are set,
2094      * and only to ATAPI devices. */
2095     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2096         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2097             return;
2098         }
2099     }
2100 
2101     if (!ide_cmd_permitted(s, val)) {
2102         ide_abort_command(s);
2103         ide_set_irq(s->bus);
2104         return;
2105     }
2106 
2107     s->status = READY_STAT | BUSY_STAT;
2108     s->error = 0;
2109     s->io_buffer_offset = 0;
2110 
2111     complete = ide_cmd_table[val].handler(s, val);
2112     if (complete) {
2113         s->status &= ~BUSY_STAT;
2114         assert(!!s->error == !!(s->status & ERR_STAT));
2115 
2116         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2117             s->status |= SEEK_STAT;
2118         }
2119 
2120         ide_cmd_done(s);
2121         ide_set_irq(s->bus);
2122     }
2123 }
2124 
2125 /* IOport [R]ead [R]egisters */
2126 enum ATA_IOPORT_RR {
2127     ATA_IOPORT_RR_DATA = 0,
2128     ATA_IOPORT_RR_ERROR = 1,
2129     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2130     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2131     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2132     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2133     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2134     ATA_IOPORT_RR_STATUS = 7,
2135     ATA_IOPORT_RR_NUM_REGISTERS,
2136 };
2137 
2138 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2139     [ATA_IOPORT_RR_DATA] = "Data",
2140     [ATA_IOPORT_RR_ERROR] = "Error",
2141     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2142     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2143     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2144     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2145     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2146     [ATA_IOPORT_RR_STATUS] = "Status"
2147 };
2148 
2149 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2150 {
2151     IDEBus *bus = opaque;
2152     IDEState *s = idebus_active_if(bus);
2153     uint32_t reg_num;
2154     int ret, hob;
2155 
2156     reg_num = addr & 7;
2157     hob = bus->cmd & (IDE_CTRL_HOB);
2158     switch (reg_num) {
2159     case ATA_IOPORT_RR_DATA:
2160         ret = 0xff;
2161         break;
2162     case ATA_IOPORT_RR_ERROR:
2163         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2164             (s != bus->ifs && !s->blk)) {
2165             ret = 0;
2166         } else if (!hob) {
2167             ret = s->error;
2168         } else {
2169             ret = s->hob_feature;
2170         }
2171         break;
2172     case ATA_IOPORT_RR_SECTOR_COUNT:
2173         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2174             ret = 0;
2175         } else if (!hob) {
2176             ret = s->nsector & 0xff;
2177         } else {
2178             ret = s->hob_nsector;
2179         }
2180         break;
2181     case ATA_IOPORT_RR_SECTOR_NUMBER:
2182         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2183             ret = 0;
2184         } else if (!hob) {
2185             ret = s->sector;
2186         } else {
2187             ret = s->hob_sector;
2188         }
2189         break;
2190     case ATA_IOPORT_RR_CYLINDER_LOW:
2191         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2192             ret = 0;
2193         } else if (!hob) {
2194             ret = s->lcyl;
2195         } else {
2196             ret = s->hob_lcyl;
2197         }
2198         break;
2199     case ATA_IOPORT_RR_CYLINDER_HIGH:
2200         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2201             ret = 0;
2202         } else if (!hob) {
2203             ret = s->hcyl;
2204         } else {
2205             ret = s->hob_hcyl;
2206         }
2207         break;
2208     case ATA_IOPORT_RR_DEVICE_HEAD:
2209         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2210             ret = 0;
2211         } else {
2212             ret = s->select;
2213         }
2214         break;
2215     default:
2216     case ATA_IOPORT_RR_STATUS:
2217         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2218             (s != bus->ifs && !s->blk)) {
2219             ret = 0;
2220         } else {
2221             ret = s->status;
2222         }
2223         qemu_irq_lower(bus->irq);
2224         break;
2225     }
2226 
2227     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2228     return ret;
2229 }
2230 
2231 uint32_t ide_status_read(void *opaque, uint32_t addr)
2232 {
2233     IDEBus *bus = opaque;
2234     IDEState *s = idebus_active_if(bus);
2235     int ret;
2236 
2237     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2238         (s != bus->ifs && !s->blk)) {
2239         ret = 0;
2240     } else {
2241         ret = s->status;
2242     }
2243 
2244     trace_ide_status_read(addr, ret, bus, s);
2245     return ret;
2246 }
2247 
2248 static void ide_perform_srst(IDEState *s)
2249 {
2250     s->status |= BUSY_STAT;
2251 
2252     /* Halt PIO (Via register state); PIO BH remains scheduled. */
2253     ide_transfer_halt(s);
2254 
2255     /* Cancel DMA -- may drain block device and invoke callbacks */
2256     ide_cancel_dma_sync(s);
2257 
2258     /* Cancel PIO callback, reset registers/signature, etc */
2259     ide_reset(s);
2260 
2261     /* perform diagnostic */
2262     cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2263 }
2264 
2265 static void ide_bus_perform_srst(void *opaque)
2266 {
2267     IDEBus *bus = opaque;
2268     IDEState *s;
2269     int i;
2270 
2271     for (i = 0; i < 2; i++) {
2272         s = &bus->ifs[i];
2273         ide_perform_srst(s);
2274     }
2275 
2276     bus->cmd &= ~IDE_CTRL_RESET;
2277 }
2278 
2279 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2280 {
2281     IDEBus *bus = opaque;
2282     IDEState *s;
2283     int i;
2284 
2285     trace_ide_ctrl_write(addr, val, bus);
2286 
2287     /* Device0 and Device1 each have their own control register,
2288      * but QEMU models it as just one register in the controller. */
2289     if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2290         for (i = 0; i < 2; i++) {
2291             s = &bus->ifs[i];
2292             s->status |= BUSY_STAT;
2293         }
2294         replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2295                                          ide_bus_perform_srst, bus);
2296     }
2297 
2298     bus->cmd = val;
2299 }
2300 
2301 /*
2302  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2303  * transferred from the device to the guest), false if it's a PIO in
2304  */
2305 static bool ide_is_pio_out(IDEState *s)
2306 {
2307     if (s->end_transfer_func == ide_sector_write ||
2308         s->end_transfer_func == ide_atapi_cmd) {
2309         return false;
2310     } else if (s->end_transfer_func == ide_sector_read ||
2311                s->end_transfer_func == ide_transfer_stop ||
2312                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2313                s->end_transfer_func == ide_dummy_transfer_stop) {
2314         return true;
2315     }
2316 
2317     abort();
2318 }
2319 
2320 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2321 {
2322     IDEBus *bus = opaque;
2323     IDEState *s = idebus_active_if(bus);
2324     uint8_t *p;
2325 
2326     trace_ide_data_writew(addr, val, bus, s);
2327 
2328     /* PIO data access allowed only when DRQ bit is set. The result of a write
2329      * during PIO out is indeterminate, just ignore it. */
2330     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2331         return;
2332     }
2333 
2334     p = s->data_ptr;
2335     if (p + 2 > s->data_end) {
2336         return;
2337     }
2338 
2339     *(uint16_t *)p = le16_to_cpu(val);
2340     p += 2;
2341     s->data_ptr = p;
2342     if (p >= s->data_end) {
2343         s->status &= ~DRQ_STAT;
2344         s->end_transfer_func(s);
2345     }
2346 }
2347 
2348 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2349 {
2350     IDEBus *bus = opaque;
2351     IDEState *s = idebus_active_if(bus);
2352     uint8_t *p;
2353     int ret;
2354 
2355     /* PIO data access allowed only when DRQ bit is set. The result of a read
2356      * during PIO in is indeterminate, return 0 and don't move forward. */
2357     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2358         return 0;
2359     }
2360 
2361     p = s->data_ptr;
2362     if (p + 2 > s->data_end) {
2363         return 0;
2364     }
2365 
2366     ret = cpu_to_le16(*(uint16_t *)p);
2367     p += 2;
2368     s->data_ptr = p;
2369     if (p >= s->data_end) {
2370         s->status &= ~DRQ_STAT;
2371         s->end_transfer_func(s);
2372     }
2373 
2374     trace_ide_data_readw(addr, ret, bus, s);
2375     return ret;
2376 }
2377 
2378 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2379 {
2380     IDEBus *bus = opaque;
2381     IDEState *s = idebus_active_if(bus);
2382     uint8_t *p;
2383 
2384     trace_ide_data_writel(addr, val, bus, s);
2385 
2386     /* PIO data access allowed only when DRQ bit is set. The result of a write
2387      * during PIO out is indeterminate, just ignore it. */
2388     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2389         return;
2390     }
2391 
2392     p = s->data_ptr;
2393     if (p + 4 > s->data_end) {
2394         return;
2395     }
2396 
2397     *(uint32_t *)p = le32_to_cpu(val);
2398     p += 4;
2399     s->data_ptr = p;
2400     if (p >= s->data_end) {
2401         s->status &= ~DRQ_STAT;
2402         s->end_transfer_func(s);
2403     }
2404 }
2405 
2406 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2407 {
2408     IDEBus *bus = opaque;
2409     IDEState *s = idebus_active_if(bus);
2410     uint8_t *p;
2411     int ret;
2412 
2413     /* PIO data access allowed only when DRQ bit is set. The result of a read
2414      * during PIO in is indeterminate, return 0 and don't move forward. */
2415     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2416         ret = 0;
2417         goto out;
2418     }
2419 
2420     p = s->data_ptr;
2421     if (p + 4 > s->data_end) {
2422         return 0;
2423     }
2424 
2425     ret = cpu_to_le32(*(uint32_t *)p);
2426     p += 4;
2427     s->data_ptr = p;
2428     if (p >= s->data_end) {
2429         s->status &= ~DRQ_STAT;
2430         s->end_transfer_func(s);
2431     }
2432 
2433 out:
2434     trace_ide_data_readl(addr, ret, bus, s);
2435     return ret;
2436 }
2437 
2438 static void ide_dummy_transfer_stop(IDEState *s)
2439 {
2440     s->data_ptr = s->io_buffer;
2441     s->data_end = s->io_buffer;
2442     s->io_buffer[0] = 0xff;
2443     s->io_buffer[1] = 0xff;
2444     s->io_buffer[2] = 0xff;
2445     s->io_buffer[3] = 0xff;
2446 }
2447 
2448 void ide_bus_reset(IDEBus *bus)
2449 {
2450     bus->unit = 0;
2451     bus->cmd = 0;
2452     ide_reset(&bus->ifs[0]);
2453     ide_reset(&bus->ifs[1]);
2454     ide_clear_hob(bus);
2455 
2456     /* pending async DMA */
2457     if (bus->dma->aiocb) {
2458         trace_ide_bus_reset_aio();
2459         blk_aio_cancel(bus->dma->aiocb);
2460         bus->dma->aiocb = NULL;
2461     }
2462 
2463     /* reset dma provider too */
2464     if (bus->dma->ops->reset) {
2465         bus->dma->ops->reset(bus->dma);
2466     }
2467 }
2468 
2469 static bool ide_cd_is_tray_open(void *opaque)
2470 {
2471     return ((IDEState *)opaque)->tray_open;
2472 }
2473 
2474 static bool ide_cd_is_medium_locked(void *opaque)
2475 {
2476     return ((IDEState *)opaque)->tray_locked;
2477 }
2478 
2479 static void ide_resize_cb(void *opaque)
2480 {
2481     IDEState *s = opaque;
2482     uint64_t nb_sectors;
2483 
2484     if (!s->identify_set) {
2485         return;
2486     }
2487 
2488     blk_get_geometry(s->blk, &nb_sectors);
2489     s->nb_sectors = nb_sectors;
2490 
2491     /* Update the identify data buffer. */
2492     if (s->drive_kind == IDE_CFATA) {
2493         ide_cfata_identify_size(s);
2494     } else {
2495         /* IDE_CD uses a different set of callbacks entirely. */
2496         assert(s->drive_kind != IDE_CD);
2497         ide_identify_size(s);
2498     }
2499 }
2500 
2501 static const BlockDevOps ide_cd_block_ops = {
2502     .change_media_cb = ide_cd_change_cb,
2503     .eject_request_cb = ide_cd_eject_request_cb,
2504     .is_tray_open = ide_cd_is_tray_open,
2505     .is_medium_locked = ide_cd_is_medium_locked,
2506 };
2507 
2508 static const BlockDevOps ide_hd_block_ops = {
2509     .resize_cb = ide_resize_cb,
2510 };
2511 
2512 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2513                    const char *version, const char *serial, const char *model,
2514                    uint64_t wwn,
2515                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2516                    int chs_trans, Error **errp)
2517 {
2518     uint64_t nb_sectors;
2519 
2520     s->blk = blk;
2521     s->drive_kind = kind;
2522 
2523     blk_get_geometry(blk, &nb_sectors);
2524     s->cylinders = cylinders;
2525     s->heads = heads;
2526     s->sectors = secs;
2527     s->chs_trans = chs_trans;
2528     s->nb_sectors = nb_sectors;
2529     s->wwn = wwn;
2530     /* The SMART values should be preserved across power cycles
2531        but they aren't.  */
2532     s->smart_enabled = 1;
2533     s->smart_autosave = 1;
2534     s->smart_errors = 0;
2535     s->smart_selftest_count = 0;
2536     if (kind == IDE_CD) {
2537         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2538         blk_set_guest_block_size(blk, 2048);
2539     } else {
2540         if (!blk_is_inserted(s->blk)) {
2541             error_setg(errp, "Device needs media, but drive is empty");
2542             return -1;
2543         }
2544         if (!blk_is_writable(blk)) {
2545             error_setg(errp, "Can't use a read-only drive");
2546             return -1;
2547         }
2548         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2549     }
2550     if (serial) {
2551         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2552     } else {
2553         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2554                  "QM%05d", s->drive_serial);
2555     }
2556     if (model) {
2557         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2558     } else {
2559         switch (kind) {
2560         case IDE_CD:
2561             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2562             break;
2563         case IDE_CFATA:
2564             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2565             break;
2566         default:
2567             strcpy(s->drive_model_str, "QEMU HARDDISK");
2568             break;
2569         }
2570     }
2571 
2572     if (version) {
2573         pstrcpy(s->version, sizeof(s->version), version);
2574     } else {
2575         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2576     }
2577 
2578     ide_reset(s);
2579     blk_iostatus_enable(blk);
2580     return 0;
2581 }
2582 
2583 static void ide_init1(IDEBus *bus, int unit)
2584 {
2585     static int drive_serial = 1;
2586     IDEState *s = &bus->ifs[unit];
2587 
2588     s->bus = bus;
2589     s->unit = unit;
2590     s->drive_serial = drive_serial++;
2591     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2592     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2593     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2594     memset(s->io_buffer, 0, s->io_buffer_total_len);
2595 
2596     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2597     memset(s->smart_selftest_data, 0, 512);
2598 
2599     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2600                                            ide_sector_write_timer_cb, s);
2601 }
2602 
2603 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2604 {
2605     return 0;
2606 }
2607 
2608 static void ide_nop(const IDEDMA *dma)
2609 {
2610 }
2611 
2612 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2613 {
2614     return 0;
2615 }
2616 
2617 static const IDEDMAOps ide_dma_nop_ops = {
2618     .prepare_buf    = ide_nop_int32,
2619     .restart_dma    = ide_nop,
2620     .rw_buf         = ide_nop_int,
2621 };
2622 
2623 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2624 {
2625     s->unit = s->bus->retry_unit;
2626     ide_set_sector(s, s->bus->retry_sector_num);
2627     s->nsector = s->bus->retry_nsector;
2628     s->bus->dma->ops->restart_dma(s->bus->dma);
2629     s->io_buffer_size = 0;
2630     s->dma_cmd = dma_cmd;
2631     ide_start_dma(s, ide_dma_cb);
2632 }
2633 
2634 static void ide_restart_bh(void *opaque)
2635 {
2636     IDEBus *bus = opaque;
2637     IDEState *s;
2638     bool is_read;
2639     int error_status;
2640 
2641     qemu_bh_delete(bus->bh);
2642     bus->bh = NULL;
2643 
2644     error_status = bus->error_status;
2645     if (bus->error_status == 0) {
2646         return;
2647     }
2648 
2649     s = idebus_active_if(bus);
2650     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2651 
2652     /* The error status must be cleared before resubmitting the request: The
2653      * request may fail again, and this case can only be distinguished if the
2654      * called function can set a new error status. */
2655     bus->error_status = 0;
2656 
2657     /* The HBA has generically asked to be kicked on retry */
2658     if (error_status & IDE_RETRY_HBA) {
2659         if (s->bus->dma->ops->restart) {
2660             s->bus->dma->ops->restart(s->bus->dma);
2661         }
2662     } else if (IS_IDE_RETRY_DMA(error_status)) {
2663         if (error_status & IDE_RETRY_TRIM) {
2664             ide_restart_dma(s, IDE_DMA_TRIM);
2665         } else {
2666             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2667         }
2668     } else if (IS_IDE_RETRY_PIO(error_status)) {
2669         if (is_read) {
2670             ide_sector_read(s);
2671         } else {
2672             ide_sector_write(s);
2673         }
2674     } else if (error_status & IDE_RETRY_FLUSH) {
2675         ide_flush_cache(s);
2676     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2677         assert(s->end_transfer_func == ide_atapi_cmd);
2678         ide_atapi_dma_restart(s);
2679     } else {
2680         abort();
2681     }
2682 }
2683 
2684 static void ide_restart_cb(void *opaque, bool running, RunState state)
2685 {
2686     IDEBus *bus = opaque;
2687 
2688     if (!running)
2689         return;
2690 
2691     if (!bus->bh) {
2692         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2693         qemu_bh_schedule(bus->bh);
2694     }
2695 }
2696 
2697 void ide_register_restart_cb(IDEBus *bus)
2698 {
2699     if (bus->dma->ops->restart_dma) {
2700         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2701     }
2702 }
2703 
2704 static IDEDMA ide_dma_nop = {
2705     .ops = &ide_dma_nop_ops,
2706     .aiocb = NULL,
2707 };
2708 
2709 void ide_init2(IDEBus *bus, qemu_irq irq)
2710 {
2711     int i;
2712 
2713     for(i = 0; i < 2; i++) {
2714         ide_init1(bus, i);
2715         ide_reset(&bus->ifs[i]);
2716     }
2717     bus->irq = irq;
2718     bus->dma = &ide_dma_nop;
2719 }
2720 
2721 void ide_exit(IDEState *s)
2722 {
2723     timer_free(s->sector_write_timer);
2724     qemu_vfree(s->smart_selftest_data);
2725     qemu_vfree(s->io_buffer);
2726 }
2727 
2728 static bool is_identify_set(void *opaque, int version_id)
2729 {
2730     IDEState *s = opaque;
2731 
2732     return s->identify_set != 0;
2733 }
2734 
2735 static EndTransferFunc* transfer_end_table[] = {
2736         ide_sector_read,
2737         ide_sector_write,
2738         ide_transfer_stop,
2739         ide_atapi_cmd_reply_end,
2740         ide_atapi_cmd,
2741         ide_dummy_transfer_stop,
2742 };
2743 
2744 static int transfer_end_table_idx(EndTransferFunc *fn)
2745 {
2746     int i;
2747 
2748     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2749         if (transfer_end_table[i] == fn)
2750             return i;
2751 
2752     return -1;
2753 }
2754 
2755 static int ide_drive_post_load(void *opaque, int version_id)
2756 {
2757     IDEState *s = opaque;
2758 
2759     if (s->blk && s->identify_set) {
2760         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2761     }
2762     return 0;
2763 }
2764 
2765 static int ide_drive_pio_post_load(void *opaque, int version_id)
2766 {
2767     IDEState *s = opaque;
2768 
2769     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2770         return -EINVAL;
2771     }
2772     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2773     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2774     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2775     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2776 
2777     return 0;
2778 }
2779 
2780 static int ide_drive_pio_pre_save(void *opaque)
2781 {
2782     IDEState *s = opaque;
2783     int idx;
2784 
2785     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2786     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2787 
2788     idx = transfer_end_table_idx(s->end_transfer_func);
2789     if (idx == -1) {
2790         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2791                         __func__);
2792         s->end_transfer_fn_idx = 2;
2793     } else {
2794         s->end_transfer_fn_idx = idx;
2795     }
2796 
2797     return 0;
2798 }
2799 
2800 static bool ide_drive_pio_state_needed(void *opaque)
2801 {
2802     IDEState *s = opaque;
2803 
2804     return ((s->status & DRQ_STAT) != 0)
2805         || (s->bus->error_status & IDE_RETRY_PIO);
2806 }
2807 
2808 static bool ide_tray_state_needed(void *opaque)
2809 {
2810     IDEState *s = opaque;
2811 
2812     return s->tray_open || s->tray_locked;
2813 }
2814 
2815 static bool ide_atapi_gesn_needed(void *opaque)
2816 {
2817     IDEState *s = opaque;
2818 
2819     return s->events.new_media || s->events.eject_request;
2820 }
2821 
2822 static bool ide_error_needed(void *opaque)
2823 {
2824     IDEBus *bus = opaque;
2825 
2826     return (bus->error_status != 0);
2827 }
2828 
2829 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2830 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2831     .name ="ide_drive/atapi/gesn_state",
2832     .version_id = 1,
2833     .minimum_version_id = 1,
2834     .needed = ide_atapi_gesn_needed,
2835     .fields = (VMStateField[]) {
2836         VMSTATE_BOOL(events.new_media, IDEState),
2837         VMSTATE_BOOL(events.eject_request, IDEState),
2838         VMSTATE_END_OF_LIST()
2839     }
2840 };
2841 
2842 static const VMStateDescription vmstate_ide_tray_state = {
2843     .name = "ide_drive/tray_state",
2844     .version_id = 1,
2845     .minimum_version_id = 1,
2846     .needed = ide_tray_state_needed,
2847     .fields = (VMStateField[]) {
2848         VMSTATE_BOOL(tray_open, IDEState),
2849         VMSTATE_BOOL(tray_locked, IDEState),
2850         VMSTATE_END_OF_LIST()
2851     }
2852 };
2853 
2854 static const VMStateDescription vmstate_ide_drive_pio_state = {
2855     .name = "ide_drive/pio_state",
2856     .version_id = 1,
2857     .minimum_version_id = 1,
2858     .pre_save = ide_drive_pio_pre_save,
2859     .post_load = ide_drive_pio_post_load,
2860     .needed = ide_drive_pio_state_needed,
2861     .fields = (VMStateField[]) {
2862         VMSTATE_INT32(req_nb_sectors, IDEState),
2863         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2864                              vmstate_info_uint8, uint8_t),
2865         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2866         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2867         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2868         VMSTATE_INT32(elementary_transfer_size, IDEState),
2869         VMSTATE_INT32(packet_transfer_size, IDEState),
2870         VMSTATE_END_OF_LIST()
2871     }
2872 };
2873 
2874 const VMStateDescription vmstate_ide_drive = {
2875     .name = "ide_drive",
2876     .version_id = 3,
2877     .minimum_version_id = 0,
2878     .post_load = ide_drive_post_load,
2879     .fields = (VMStateField[]) {
2880         VMSTATE_INT32(mult_sectors, IDEState),
2881         VMSTATE_INT32(identify_set, IDEState),
2882         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2883         VMSTATE_UINT8(feature, IDEState),
2884         VMSTATE_UINT8(error, IDEState),
2885         VMSTATE_UINT32(nsector, IDEState),
2886         VMSTATE_UINT8(sector, IDEState),
2887         VMSTATE_UINT8(lcyl, IDEState),
2888         VMSTATE_UINT8(hcyl, IDEState),
2889         VMSTATE_UINT8(hob_feature, IDEState),
2890         VMSTATE_UINT8(hob_sector, IDEState),
2891         VMSTATE_UINT8(hob_nsector, IDEState),
2892         VMSTATE_UINT8(hob_lcyl, IDEState),
2893         VMSTATE_UINT8(hob_hcyl, IDEState),
2894         VMSTATE_UINT8(select, IDEState),
2895         VMSTATE_UINT8(status, IDEState),
2896         VMSTATE_UINT8(lba48, IDEState),
2897         VMSTATE_UINT8(sense_key, IDEState),
2898         VMSTATE_UINT8(asc, IDEState),
2899         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2900         VMSTATE_END_OF_LIST()
2901     },
2902     .subsections = (const VMStateDescription*[]) {
2903         &vmstate_ide_drive_pio_state,
2904         &vmstate_ide_tray_state,
2905         &vmstate_ide_atapi_gesn_state,
2906         NULL
2907     }
2908 };
2909 
2910 static const VMStateDescription vmstate_ide_error_status = {
2911     .name ="ide_bus/error",
2912     .version_id = 2,
2913     .minimum_version_id = 1,
2914     .needed = ide_error_needed,
2915     .fields = (VMStateField[]) {
2916         VMSTATE_INT32(error_status, IDEBus),
2917         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2918         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2919         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2920         VMSTATE_END_OF_LIST()
2921     }
2922 };
2923 
2924 const VMStateDescription vmstate_ide_bus = {
2925     .name = "ide_bus",
2926     .version_id = 1,
2927     .minimum_version_id = 1,
2928     .fields = (VMStateField[]) {
2929         VMSTATE_UINT8(cmd, IDEBus),
2930         VMSTATE_UINT8(unit, IDEBus),
2931         VMSTATE_END_OF_LIST()
2932     },
2933     .subsections = (const VMStateDescription*[]) {
2934         &vmstate_ide_error_status,
2935         NULL
2936     }
2937 };
2938 
2939 void ide_drive_get(DriveInfo **hd, int n)
2940 {
2941     int i;
2942 
2943     for (i = 0; i < n; i++) {
2944         hd[i] = drive_get_by_index(IF_IDE, i);
2945     }
2946 }
2947