xref: /openbmc/qemu/hw/ide/core.c (revision 429d3ae2)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/hw.h"
28 #include "hw/isa/isa.h"
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/blockdev.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qapi/error.h"
37 #include "qemu/cutils.h"
38 
39 #include "hw/ide/internal.h"
40 #include "trace.h"
41 
42 /* These values were based on a Seagate ST3500418AS but have been modified
43    to make more sense in QEMU */
44 static const int smart_attributes[][12] = {
45     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
46     /* raw read error rate*/
47     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
48     /* spin up */
49     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50     /* start stop count */
51     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52     /* remapped sectors */
53     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
54     /* power on hours */
55     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* power cycle count */
57     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58     /* airflow-temperature-celsius */
59     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
60 };
61 
62 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
63     [IDE_DMA_READ] = "DMA READ",
64     [IDE_DMA_WRITE] = "DMA WRITE",
65     [IDE_DMA_TRIM] = "DMA TRIM",
66     [IDE_DMA_ATAPI] = "DMA ATAPI"
67 };
68 
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
70 {
71     if ((unsigned)enval < IDE_DMA__COUNT) {
72         return IDE_DMA_CMD_lookup[enval];
73     }
74     return "DMA UNKNOWN CMD";
75 }
76 
77 static void ide_dummy_transfer_stop(IDEState *s);
78 
79 static void padstr(char *str, const char *src, int len)
80 {
81     int i, v;
82     for(i = 0; i < len; i++) {
83         if (*src)
84             v = *src++;
85         else
86             v = ' ';
87         str[i^1] = v;
88     }
89 }
90 
91 static void put_le16(uint16_t *p, unsigned int v)
92 {
93     *p = cpu_to_le16(v);
94 }
95 
96 static void ide_identify_size(IDEState *s)
97 {
98     uint16_t *p = (uint16_t *)s->identify_data;
99     put_le16(p + 60, s->nb_sectors);
100     put_le16(p + 61, s->nb_sectors >> 16);
101     put_le16(p + 100, s->nb_sectors);
102     put_le16(p + 101, s->nb_sectors >> 16);
103     put_le16(p + 102, s->nb_sectors >> 32);
104     put_le16(p + 103, s->nb_sectors >> 48);
105 }
106 
107 static void ide_identify(IDEState *s)
108 {
109     uint16_t *p;
110     unsigned int oldsize;
111     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
112 
113     p = (uint16_t *)s->identify_data;
114     if (s->identify_set) {
115         goto fill_buffer;
116     }
117     memset(p, 0, sizeof(s->identify_data));
118 
119     put_le16(p + 0, 0x0040);
120     put_le16(p + 1, s->cylinders);
121     put_le16(p + 3, s->heads);
122     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
123     put_le16(p + 5, 512); /* XXX: retired, remove ? */
124     put_le16(p + 6, s->sectors);
125     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
126     put_le16(p + 20, 3); /* XXX: retired, remove ? */
127     put_le16(p + 21, 512); /* cache size in sectors */
128     put_le16(p + 22, 4); /* ecc bytes */
129     padstr((char *)(p + 23), s->version, 8); /* firmware version */
130     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
133 #endif
134     put_le16(p + 48, 1); /* dword I/O */
135     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136     put_le16(p + 51, 0x200); /* PIO transfer cycle */
137     put_le16(p + 52, 0x200); /* DMA transfer cycle */
138     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139     put_le16(p + 54, s->cylinders);
140     put_le16(p + 55, s->heads);
141     put_le16(p + 56, s->sectors);
142     oldsize = s->cylinders * s->heads * s->sectors;
143     put_le16(p + 57, oldsize);
144     put_le16(p + 58, oldsize >> 16);
145     if (s->mult_sectors)
146         put_le16(p + 59, 0x100 | s->mult_sectors);
147     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
148     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
150     put_le16(p + 63, 0x07); /* mdma0-2 supported */
151     put_le16(p + 64, 0x03); /* pio3-4 supported */
152     put_le16(p + 65, 120);
153     put_le16(p + 66, 120);
154     put_le16(p + 67, 120);
155     put_le16(p + 68, 120);
156     if (dev && dev->conf.discard_granularity) {
157         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
158     }
159 
160     if (s->ncq_queues) {
161         put_le16(p + 75, s->ncq_queues - 1);
162         /* NCQ supported */
163         put_le16(p + 76, (1 << 8));
164     }
165 
166     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
167     put_le16(p + 81, 0x16); /* conforms to ata5 */
168     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
170     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
173     if (s->wwn) {
174         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
175     } else {
176         put_le16(p + 84, (1 << 14) | 0);
177     }
178     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179     if (blk_enable_write_cache(s->blk)) {
180         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
181     } else {
182         put_le16(p + 85, (1 << 14) | 1);
183     }
184     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
186     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
187     if (s->wwn) {
188         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
189     } else {
190         put_le16(p + 87, (1 << 14) | 0);
191     }
192     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
194     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
195     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
198 
199     if (dev && dev->conf.physical_block_size)
200         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
201     if (s->wwn) {
202         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203         put_le16(p + 108, s->wwn >> 48);
204         put_le16(p + 109, s->wwn >> 32);
205         put_le16(p + 110, s->wwn >> 16);
206         put_le16(p + 111, s->wwn);
207     }
208     if (dev && dev->conf.discard_granularity) {
209         put_le16(p + 169, 1); /* TRIM support */
210     }
211     if (dev) {
212         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
213     }
214 
215     ide_identify_size(s);
216     s->identify_set = 1;
217 
218 fill_buffer:
219     memcpy(s->io_buffer, p, sizeof(s->identify_data));
220 }
221 
222 static void ide_atapi_identify(IDEState *s)
223 {
224     uint16_t *p;
225 
226     p = (uint16_t *)s->identify_data;
227     if (s->identify_set) {
228         goto fill_buffer;
229     }
230     memset(p, 0, sizeof(s->identify_data));
231 
232     /* Removable CDROM, 50us response, 12 byte packets */
233     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
234     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
235     put_le16(p + 20, 3); /* buffer type */
236     put_le16(p + 21, 512); /* cache size in sectors */
237     put_le16(p + 22, 4); /* ecc bytes */
238     padstr((char *)(p + 23), s->version, 8); /* firmware version */
239     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
240     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
241 #ifdef USE_DMA_CDROM
242     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
243     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
244     put_le16(p + 62, 7);  /* single word dma0-2 supported */
245     put_le16(p + 63, 7);  /* mdma0-2 supported */
246 #else
247     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
248     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
249     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
250 #endif
251     put_le16(p + 64, 3); /* pio3-4 supported */
252     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
253     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
254     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
255     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
256 
257     put_le16(p + 71, 30); /* in ns */
258     put_le16(p + 72, 30); /* in ns */
259 
260     if (s->ncq_queues) {
261         put_le16(p + 75, s->ncq_queues - 1);
262         /* NCQ supported */
263         put_le16(p + 76, (1 << 8));
264     }
265 
266     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
267     if (s->wwn) {
268         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
269         put_le16(p + 87, (1 << 8)); /* WWN enabled */
270     }
271 
272 #ifdef USE_DMA_CDROM
273     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
274 #endif
275 
276     if (s->wwn) {
277         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
278         put_le16(p + 108, s->wwn >> 48);
279         put_le16(p + 109, s->wwn >> 32);
280         put_le16(p + 110, s->wwn >> 16);
281         put_le16(p + 111, s->wwn);
282     }
283 
284     s->identify_set = 1;
285 
286 fill_buffer:
287     memcpy(s->io_buffer, p, sizeof(s->identify_data));
288 }
289 
290 static void ide_cfata_identify_size(IDEState *s)
291 {
292     uint16_t *p = (uint16_t *)s->identify_data;
293     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
294     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
295     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
296     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
297 }
298 
299 static void ide_cfata_identify(IDEState *s)
300 {
301     uint16_t *p;
302     uint32_t cur_sec;
303 
304     p = (uint16_t *)s->identify_data;
305     if (s->identify_set) {
306         goto fill_buffer;
307     }
308     memset(p, 0, sizeof(s->identify_data));
309 
310     cur_sec = s->cylinders * s->heads * s->sectors;
311 
312     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
313     put_le16(p + 1, s->cylinders);		/* Default cylinders */
314     put_le16(p + 3, s->heads);			/* Default heads */
315     put_le16(p + 6, s->sectors);		/* Default sectors per track */
316     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
317     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
318     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
319     put_le16(p + 22, 0x0004);			/* ECC bytes */
320     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
321     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
322 #if MAX_MULT_SECTORS > 1
323     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
324 #else
325     put_le16(p + 47, 0x0000);
326 #endif
327     put_le16(p + 49, 0x0f00);			/* Capabilities */
328     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
329     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
330     put_le16(p + 53, 0x0003);			/* Translation params valid */
331     put_le16(p + 54, s->cylinders);		/* Current cylinders */
332     put_le16(p + 55, s->heads);			/* Current heads */
333     put_le16(p + 56, s->sectors);		/* Current sectors */
334     put_le16(p + 57, cur_sec);			/* Current capacity */
335     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
336     if (s->mult_sectors)			/* Multiple sector setting */
337         put_le16(p + 59, 0x100 | s->mult_sectors);
338     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
339     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
340     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
341     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
342     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
343     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
344     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
345     put_le16(p + 82, 0x400c);			/* Command Set supported */
346     put_le16(p + 83, 0x7068);			/* Command Set supported */
347     put_le16(p + 84, 0x4000);			/* Features supported */
348     put_le16(p + 85, 0x000c);			/* Command Set enabled */
349     put_le16(p + 86, 0x7044);			/* Command Set enabled */
350     put_le16(p + 87, 0x4000);			/* Features enabled */
351     put_le16(p + 91, 0x4060);			/* Current APM level */
352     put_le16(p + 129, 0x0002);			/* Current features option */
353     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
354     put_le16(p + 131, 0x0001);			/* Initial power mode */
355     put_le16(p + 132, 0x0000);			/* User signature */
356     put_le16(p + 160, 0x8100);			/* Power requirement */
357     put_le16(p + 161, 0x8001);			/* CF command set */
358 
359     ide_cfata_identify_size(s);
360     s->identify_set = 1;
361 
362 fill_buffer:
363     memcpy(s->io_buffer, p, sizeof(s->identify_data));
364 }
365 
366 static void ide_set_signature(IDEState *s)
367 {
368     s->select &= 0xf0; /* clear head */
369     /* put signature */
370     s->nsector = 1;
371     s->sector = 1;
372     if (s->drive_kind == IDE_CD) {
373         s->lcyl = 0x14;
374         s->hcyl = 0xeb;
375     } else if (s->blk) {
376         s->lcyl = 0;
377         s->hcyl = 0;
378     } else {
379         s->lcyl = 0xff;
380         s->hcyl = 0xff;
381     }
382 }
383 
384 static bool ide_sect_range_ok(IDEState *s,
385                               uint64_t sector, uint64_t nb_sectors)
386 {
387     uint64_t total_sectors;
388 
389     blk_get_geometry(s->blk, &total_sectors);
390     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
391         return false;
392     }
393     return true;
394 }
395 
396 typedef struct TrimAIOCB {
397     BlockAIOCB common;
398     IDEState *s;
399     QEMUBH *bh;
400     int ret;
401     QEMUIOVector *qiov;
402     BlockAIOCB *aiocb;
403     int i, j;
404 } TrimAIOCB;
405 
406 static void trim_aio_cancel(BlockAIOCB *acb)
407 {
408     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
409 
410     /* Exit the loop so ide_issue_trim_cb will not continue  */
411     iocb->j = iocb->qiov->niov - 1;
412     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
413 
414     iocb->ret = -ECANCELED;
415 
416     if (iocb->aiocb) {
417         blk_aio_cancel_async(iocb->aiocb);
418         iocb->aiocb = NULL;
419     }
420 }
421 
422 static const AIOCBInfo trim_aiocb_info = {
423     .aiocb_size         = sizeof(TrimAIOCB),
424     .cancel_async       = trim_aio_cancel,
425 };
426 
427 static void ide_trim_bh_cb(void *opaque)
428 {
429     TrimAIOCB *iocb = opaque;
430 
431     iocb->common.cb(iocb->common.opaque, iocb->ret);
432 
433     qemu_bh_delete(iocb->bh);
434     iocb->bh = NULL;
435     qemu_aio_unref(iocb);
436 }
437 
438 static void ide_issue_trim_cb(void *opaque, int ret)
439 {
440     TrimAIOCB *iocb = opaque;
441     IDEState *s = iocb->s;
442 
443     if (ret >= 0) {
444         while (iocb->j < iocb->qiov->niov) {
445             int j = iocb->j;
446             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
447                 int i = iocb->i;
448                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
449 
450                 /* 6-byte LBA + 2-byte range per entry */
451                 uint64_t entry = le64_to_cpu(buffer[i]);
452                 uint64_t sector = entry & 0x0000ffffffffffffULL;
453                 uint16_t count = entry >> 48;
454 
455                 if (count == 0) {
456                     continue;
457                 }
458 
459                 if (!ide_sect_range_ok(s, sector, count)) {
460                     iocb->ret = -EINVAL;
461                     goto done;
462                 }
463 
464                 /* Got an entry! Submit and exit.  */
465                 iocb->aiocb = blk_aio_pdiscard(s->blk,
466                                                sector << BDRV_SECTOR_BITS,
467                                                count << BDRV_SECTOR_BITS,
468                                                ide_issue_trim_cb, opaque);
469                 return;
470             }
471 
472             iocb->j++;
473             iocb->i = -1;
474         }
475     } else {
476         iocb->ret = ret;
477     }
478 
479 done:
480     iocb->aiocb = NULL;
481     if (iocb->bh) {
482         qemu_bh_schedule(iocb->bh);
483     }
484 }
485 
486 BlockAIOCB *ide_issue_trim(
487         int64_t offset, QEMUIOVector *qiov,
488         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
489 {
490     IDEState *s = opaque;
491     TrimAIOCB *iocb;
492 
493     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
494     iocb->s = s;
495     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
496     iocb->ret = 0;
497     iocb->qiov = qiov;
498     iocb->i = -1;
499     iocb->j = 0;
500     ide_issue_trim_cb(iocb, 0);
501     return &iocb->common;
502 }
503 
504 void ide_abort_command(IDEState *s)
505 {
506     ide_transfer_stop(s);
507     s->status = READY_STAT | ERR_STAT;
508     s->error = ABRT_ERR;
509 }
510 
511 static void ide_set_retry(IDEState *s)
512 {
513     s->bus->retry_unit = s->unit;
514     s->bus->retry_sector_num = ide_get_sector(s);
515     s->bus->retry_nsector = s->nsector;
516 }
517 
518 static void ide_clear_retry(IDEState *s)
519 {
520     s->bus->retry_unit = -1;
521     s->bus->retry_sector_num = 0;
522     s->bus->retry_nsector = 0;
523 }
524 
525 /* prepare data transfer and tell what to do after */
526 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
527                                   EndTransferFunc *end_transfer_func)
528 {
529     s->data_ptr = buf;
530     s->data_end = buf + size;
531     ide_set_retry(s);
532     if (!(s->status & ERR_STAT)) {
533         s->status |= DRQ_STAT;
534     }
535     if (!s->bus->dma->ops->pio_transfer) {
536         s->end_transfer_func = end_transfer_func;
537         return false;
538     }
539     s->bus->dma->ops->pio_transfer(s->bus->dma);
540     return true;
541 }
542 
543 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
544                         EndTransferFunc *end_transfer_func)
545 {
546     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
547         end_transfer_func(s);
548     }
549 }
550 
551 static void ide_cmd_done(IDEState *s)
552 {
553     if (s->bus->dma->ops->cmd_done) {
554         s->bus->dma->ops->cmd_done(s->bus->dma);
555     }
556 }
557 
558 static void ide_transfer_halt(IDEState *s)
559 {
560     s->end_transfer_func = ide_transfer_stop;
561     s->data_ptr = s->io_buffer;
562     s->data_end = s->io_buffer;
563     s->status &= ~DRQ_STAT;
564 }
565 
566 void ide_transfer_stop(IDEState *s)
567 {
568     ide_transfer_halt(s);
569     ide_cmd_done(s);
570 }
571 
572 int64_t ide_get_sector(IDEState *s)
573 {
574     int64_t sector_num;
575     if (s->select & 0x40) {
576         /* lba */
577 	if (!s->lba48) {
578 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
579 		(s->lcyl << 8) | s->sector;
580 	} else {
581 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
582 		((int64_t) s->hob_lcyl << 32) |
583 		((int64_t) s->hob_sector << 24) |
584 		((int64_t) s->hcyl << 16) |
585 		((int64_t) s->lcyl << 8) | s->sector;
586 	}
587     } else {
588         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
589             (s->select & 0x0f) * s->sectors + (s->sector - 1);
590     }
591     return sector_num;
592 }
593 
594 void ide_set_sector(IDEState *s, int64_t sector_num)
595 {
596     unsigned int cyl, r;
597     if (s->select & 0x40) {
598 	if (!s->lba48) {
599             s->select = (s->select & 0xf0) | (sector_num >> 24);
600             s->hcyl = (sector_num >> 16);
601             s->lcyl = (sector_num >> 8);
602             s->sector = (sector_num);
603 	} else {
604 	    s->sector = sector_num;
605 	    s->lcyl = sector_num >> 8;
606 	    s->hcyl = sector_num >> 16;
607 	    s->hob_sector = sector_num >> 24;
608 	    s->hob_lcyl = sector_num >> 32;
609 	    s->hob_hcyl = sector_num >> 40;
610 	}
611     } else {
612         cyl = sector_num / (s->heads * s->sectors);
613         r = sector_num % (s->heads * s->sectors);
614         s->hcyl = cyl >> 8;
615         s->lcyl = cyl;
616         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
617         s->sector = (r % s->sectors) + 1;
618     }
619 }
620 
621 static void ide_rw_error(IDEState *s) {
622     ide_abort_command(s);
623     ide_set_irq(s->bus);
624 }
625 
626 static void ide_buffered_readv_cb(void *opaque, int ret)
627 {
628     IDEBufferedRequest *req = opaque;
629     if (!req->orphaned) {
630         if (!ret) {
631             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
632                                 req->original_qiov->size);
633         }
634         req->original_cb(req->original_opaque, ret);
635     }
636     QLIST_REMOVE(req, list);
637     qemu_vfree(req->iov.iov_base);
638     g_free(req);
639 }
640 
641 #define MAX_BUFFERED_REQS 16
642 
643 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
644                                QEMUIOVector *iov, int nb_sectors,
645                                BlockCompletionFunc *cb, void *opaque)
646 {
647     BlockAIOCB *aioreq;
648     IDEBufferedRequest *req;
649     int c = 0;
650 
651     QLIST_FOREACH(req, &s->buffered_requests, list) {
652         c++;
653     }
654     if (c > MAX_BUFFERED_REQS) {
655         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
656     }
657 
658     req = g_new0(IDEBufferedRequest, 1);
659     req->original_qiov = iov;
660     req->original_cb = cb;
661     req->original_opaque = opaque;
662     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
663     req->iov.iov_len = iov->size;
664     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
665 
666     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
667                             &req->qiov, 0, ide_buffered_readv_cb, req);
668 
669     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
670     return aioreq;
671 }
672 
673 /**
674  * Cancel all pending DMA requests.
675  * Any buffered DMA requests are instantly canceled,
676  * but any pending unbuffered DMA requests must be waited on.
677  */
678 void ide_cancel_dma_sync(IDEState *s)
679 {
680     IDEBufferedRequest *req;
681 
682     /* First invoke the callbacks of all buffered requests
683      * and flag those requests as orphaned. Ideally there
684      * are no unbuffered (Scatter Gather DMA Requests or
685      * write requests) pending and we can avoid to drain. */
686     QLIST_FOREACH(req, &s->buffered_requests, list) {
687         if (!req->orphaned) {
688             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
689             req->original_cb(req->original_opaque, -ECANCELED);
690         }
691         req->orphaned = true;
692     }
693 
694     /*
695      * We can't cancel Scatter Gather DMA in the middle of the
696      * operation or a partial (not full) DMA transfer would reach
697      * the storage so we wait for completion instead (we beahve
698      * like if the DMA was completed by the time the guest trying
699      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
700      * set).
701      *
702      * In the future we'll be able to safely cancel the I/O if the
703      * whole DMA operation will be submitted to disk with a single
704      * aio operation with preadv/pwritev.
705      */
706     if (s->bus->dma->aiocb) {
707         trace_ide_cancel_dma_sync_remaining();
708         blk_drain(s->blk);
709         assert(s->bus->dma->aiocb == NULL);
710     }
711 }
712 
713 static void ide_sector_read(IDEState *s);
714 
715 static void ide_sector_read_cb(void *opaque, int ret)
716 {
717     IDEState *s = opaque;
718     int n;
719 
720     s->pio_aiocb = NULL;
721     s->status &= ~BUSY_STAT;
722 
723     if (ret == -ECANCELED) {
724         return;
725     }
726     if (ret != 0) {
727         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
728                                 IDE_RETRY_READ)) {
729             return;
730         }
731     }
732 
733     block_acct_done(blk_get_stats(s->blk), &s->acct);
734 
735     n = s->nsector;
736     if (n > s->req_nb_sectors) {
737         n = s->req_nb_sectors;
738     }
739 
740     ide_set_sector(s, ide_get_sector(s) + n);
741     s->nsector -= n;
742     /* Allow the guest to read the io_buffer */
743     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
744     ide_set_irq(s->bus);
745 }
746 
747 static void ide_sector_read(IDEState *s)
748 {
749     int64_t sector_num;
750     int n;
751 
752     s->status = READY_STAT | SEEK_STAT;
753     s->error = 0; /* not needed by IDE spec, but needed by Windows */
754     sector_num = ide_get_sector(s);
755     n = s->nsector;
756 
757     if (n == 0) {
758         ide_transfer_stop(s);
759         return;
760     }
761 
762     s->status |= BUSY_STAT;
763 
764     if (n > s->req_nb_sectors) {
765         n = s->req_nb_sectors;
766     }
767 
768     trace_ide_sector_read(sector_num, n);
769 
770     if (!ide_sect_range_ok(s, sector_num, n)) {
771         ide_rw_error(s);
772         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
773         return;
774     }
775 
776     s->iov.iov_base = s->io_buffer;
777     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
778     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
779 
780     block_acct_start(blk_get_stats(s->blk), &s->acct,
781                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
782     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
783                                       ide_sector_read_cb, s);
784 }
785 
786 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
787 {
788     if (s->bus->dma->ops->commit_buf) {
789         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
790     }
791     s->io_buffer_offset += tx_bytes;
792     qemu_sglist_destroy(&s->sg);
793 }
794 
795 void ide_set_inactive(IDEState *s, bool more)
796 {
797     s->bus->dma->aiocb = NULL;
798     ide_clear_retry(s);
799     if (s->bus->dma->ops->set_inactive) {
800         s->bus->dma->ops->set_inactive(s->bus->dma, more);
801     }
802     ide_cmd_done(s);
803 }
804 
805 void ide_dma_error(IDEState *s)
806 {
807     dma_buf_commit(s, 0);
808     ide_abort_command(s);
809     ide_set_inactive(s, false);
810     ide_set_irq(s->bus);
811 }
812 
813 int ide_handle_rw_error(IDEState *s, int error, int op)
814 {
815     bool is_read = (op & IDE_RETRY_READ) != 0;
816     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
817 
818     if (action == BLOCK_ERROR_ACTION_STOP) {
819         assert(s->bus->retry_unit == s->unit);
820         s->bus->error_status = op;
821     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
822         block_acct_failed(blk_get_stats(s->blk), &s->acct);
823         if (IS_IDE_RETRY_DMA(op)) {
824             ide_dma_error(s);
825         } else if (IS_IDE_RETRY_ATAPI(op)) {
826             ide_atapi_io_error(s, -error);
827         } else {
828             ide_rw_error(s);
829         }
830     }
831     blk_error_action(s->blk, action, is_read, error);
832     return action != BLOCK_ERROR_ACTION_IGNORE;
833 }
834 
835 static void ide_dma_cb(void *opaque, int ret)
836 {
837     IDEState *s = opaque;
838     int n;
839     int64_t sector_num;
840     uint64_t offset;
841     bool stay_active = false;
842 
843     if (ret == -ECANCELED) {
844         return;
845     }
846 
847     if (ret == -EINVAL) {
848         ide_dma_error(s);
849         return;
850     }
851 
852     if (ret < 0) {
853         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
854             s->bus->dma->aiocb = NULL;
855             dma_buf_commit(s, 0);
856             return;
857         }
858     }
859 
860     n = s->io_buffer_size >> 9;
861     if (n > s->nsector) {
862         /* The PRDs were longer than needed for this request. Shorten them so
863          * we don't get a negative remainder. The Active bit must remain set
864          * after the request completes. */
865         n = s->nsector;
866         stay_active = true;
867     }
868 
869     sector_num = ide_get_sector(s);
870     if (n > 0) {
871         assert(n * 512 == s->sg.size);
872         dma_buf_commit(s, s->sg.size);
873         sector_num += n;
874         ide_set_sector(s, sector_num);
875         s->nsector -= n;
876     }
877 
878     /* end of transfer ? */
879     if (s->nsector == 0) {
880         s->status = READY_STAT | SEEK_STAT;
881         ide_set_irq(s->bus);
882         goto eot;
883     }
884 
885     /* launch next transfer */
886     n = s->nsector;
887     s->io_buffer_index = 0;
888     s->io_buffer_size = n * 512;
889     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
890         /* The PRDs were too short. Reset the Active bit, but don't raise an
891          * interrupt. */
892         s->status = READY_STAT | SEEK_STAT;
893         dma_buf_commit(s, 0);
894         goto eot;
895     }
896 
897     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
898 
899     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
900         !ide_sect_range_ok(s, sector_num, n)) {
901         ide_dma_error(s);
902         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
903         return;
904     }
905 
906     offset = sector_num << BDRV_SECTOR_BITS;
907     switch (s->dma_cmd) {
908     case IDE_DMA_READ:
909         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
910                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
911         break;
912     case IDE_DMA_WRITE:
913         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
914                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
915         break;
916     case IDE_DMA_TRIM:
917         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
918                                         &s->sg, offset, BDRV_SECTOR_SIZE,
919                                         ide_issue_trim, s, ide_dma_cb, s,
920                                         DMA_DIRECTION_TO_DEVICE);
921         break;
922     default:
923         abort();
924     }
925     return;
926 
927 eot:
928     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
929         block_acct_done(blk_get_stats(s->blk), &s->acct);
930     }
931     ide_set_inactive(s, stay_active);
932 }
933 
934 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
935 {
936     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
937     s->io_buffer_size = 0;
938     s->dma_cmd = dma_cmd;
939 
940     switch (dma_cmd) {
941     case IDE_DMA_READ:
942         block_acct_start(blk_get_stats(s->blk), &s->acct,
943                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
944         break;
945     case IDE_DMA_WRITE:
946         block_acct_start(blk_get_stats(s->blk), &s->acct,
947                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
948         break;
949     default:
950         break;
951     }
952 
953     ide_start_dma(s, ide_dma_cb);
954 }
955 
956 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
957 {
958     s->io_buffer_index = 0;
959     ide_set_retry(s);
960     if (s->bus->dma->ops->start_dma) {
961         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
962     }
963 }
964 
965 static void ide_sector_write(IDEState *s);
966 
967 static void ide_sector_write_timer_cb(void *opaque)
968 {
969     IDEState *s = opaque;
970     ide_set_irq(s->bus);
971 }
972 
973 static void ide_sector_write_cb(void *opaque, int ret)
974 {
975     IDEState *s = opaque;
976     int n;
977 
978     if (ret == -ECANCELED) {
979         return;
980     }
981 
982     s->pio_aiocb = NULL;
983     s->status &= ~BUSY_STAT;
984 
985     if (ret != 0) {
986         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
987             return;
988         }
989     }
990 
991     block_acct_done(blk_get_stats(s->blk), &s->acct);
992 
993     n = s->nsector;
994     if (n > s->req_nb_sectors) {
995         n = s->req_nb_sectors;
996     }
997     s->nsector -= n;
998 
999     ide_set_sector(s, ide_get_sector(s) + n);
1000     if (s->nsector == 0) {
1001         /* no more sectors to write */
1002         ide_transfer_stop(s);
1003     } else {
1004         int n1 = s->nsector;
1005         if (n1 > s->req_nb_sectors) {
1006             n1 = s->req_nb_sectors;
1007         }
1008         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1009                            ide_sector_write);
1010     }
1011 
1012     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1013         /* It seems there is a bug in the Windows 2000 installer HDD
1014            IDE driver which fills the disk with empty logs when the
1015            IDE write IRQ comes too early. This hack tries to correct
1016            that at the expense of slower write performances. Use this
1017            option _only_ to install Windows 2000. You must disable it
1018            for normal use. */
1019         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1020                   (NANOSECONDS_PER_SECOND / 1000));
1021     } else {
1022         ide_set_irq(s->bus);
1023     }
1024 }
1025 
1026 static void ide_sector_write(IDEState *s)
1027 {
1028     int64_t sector_num;
1029     int n;
1030 
1031     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1032     sector_num = ide_get_sector(s);
1033 
1034     n = s->nsector;
1035     if (n > s->req_nb_sectors) {
1036         n = s->req_nb_sectors;
1037     }
1038 
1039     trace_ide_sector_write(sector_num, n);
1040 
1041     if (!ide_sect_range_ok(s, sector_num, n)) {
1042         ide_rw_error(s);
1043         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1044         return;
1045     }
1046 
1047     s->iov.iov_base = s->io_buffer;
1048     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1049     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1050 
1051     block_acct_start(blk_get_stats(s->blk), &s->acct,
1052                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1053     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1054                                    &s->qiov, 0, ide_sector_write_cb, s);
1055 }
1056 
1057 static void ide_flush_cb(void *opaque, int ret)
1058 {
1059     IDEState *s = opaque;
1060 
1061     s->pio_aiocb = NULL;
1062 
1063     if (ret == -ECANCELED) {
1064         return;
1065     }
1066     if (ret < 0) {
1067         /* XXX: What sector number to set here? */
1068         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1069             return;
1070         }
1071     }
1072 
1073     if (s->blk) {
1074         block_acct_done(blk_get_stats(s->blk), &s->acct);
1075     }
1076     s->status = READY_STAT | SEEK_STAT;
1077     ide_cmd_done(s);
1078     ide_set_irq(s->bus);
1079 }
1080 
1081 static void ide_flush_cache(IDEState *s)
1082 {
1083     if (s->blk == NULL) {
1084         ide_flush_cb(s, 0);
1085         return;
1086     }
1087 
1088     s->status |= BUSY_STAT;
1089     ide_set_retry(s);
1090     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1091     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1092 }
1093 
1094 static void ide_cfata_metadata_inquiry(IDEState *s)
1095 {
1096     uint16_t *p;
1097     uint32_t spd;
1098 
1099     p = (uint16_t *) s->io_buffer;
1100     memset(p, 0, 0x200);
1101     spd = ((s->mdata_size - 1) >> 9) + 1;
1102 
1103     put_le16(p + 0, 0x0001);			/* Data format revision */
1104     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1105     put_le16(p + 2, s->media_changed);		/* Media status */
1106     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1107     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1108     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1109     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1110 }
1111 
1112 static void ide_cfata_metadata_read(IDEState *s)
1113 {
1114     uint16_t *p;
1115 
1116     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1117         s->status = ERR_STAT;
1118         s->error = ABRT_ERR;
1119         return;
1120     }
1121 
1122     p = (uint16_t *) s->io_buffer;
1123     memset(p, 0, 0x200);
1124 
1125     put_le16(p + 0, s->media_changed);		/* Media status */
1126     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1127                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1128                                     s->nsector << 9), 0x200 - 2));
1129 }
1130 
1131 static void ide_cfata_metadata_write(IDEState *s)
1132 {
1133     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1134         s->status = ERR_STAT;
1135         s->error = ABRT_ERR;
1136         return;
1137     }
1138 
1139     s->media_changed = 0;
1140 
1141     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1142                     s->io_buffer + 2,
1143                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1144                                     s->nsector << 9), 0x200 - 2));
1145 }
1146 
1147 /* called when the inserted state of the media has changed */
1148 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1149 {
1150     IDEState *s = opaque;
1151     uint64_t nb_sectors;
1152 
1153     s->tray_open = !load;
1154     blk_get_geometry(s->blk, &nb_sectors);
1155     s->nb_sectors = nb_sectors;
1156 
1157     /*
1158      * First indicate to the guest that a CD has been removed.  That's
1159      * done on the next command the guest sends us.
1160      *
1161      * Then we set UNIT_ATTENTION, by which the guest will
1162      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1163      */
1164     s->cdrom_changed = 1;
1165     s->events.new_media = true;
1166     s->events.eject_request = false;
1167     ide_set_irq(s->bus);
1168 }
1169 
1170 static void ide_cd_eject_request_cb(void *opaque, bool force)
1171 {
1172     IDEState *s = opaque;
1173 
1174     s->events.eject_request = true;
1175     if (force) {
1176         s->tray_locked = false;
1177     }
1178     ide_set_irq(s->bus);
1179 }
1180 
1181 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1182 {
1183     s->lba48 = lba48;
1184 
1185     /* handle the 'magic' 0 nsector count conversion here. to avoid
1186      * fiddling with the rest of the read logic, we just store the
1187      * full sector count in ->nsector and ignore ->hob_nsector from now
1188      */
1189     if (!s->lba48) {
1190 	if (!s->nsector)
1191 	    s->nsector = 256;
1192     } else {
1193 	if (!s->nsector && !s->hob_nsector)
1194 	    s->nsector = 65536;
1195 	else {
1196 	    int lo = s->nsector;
1197 	    int hi = s->hob_nsector;
1198 
1199 	    s->nsector = (hi << 8) | lo;
1200 	}
1201     }
1202 }
1203 
1204 static void ide_clear_hob(IDEBus *bus)
1205 {
1206     /* any write clears HOB high bit of device control register */
1207     bus->ifs[0].select &= ~(1 << 7);
1208     bus->ifs[1].select &= ~(1 << 7);
1209 }
1210 
1211 /* IOport [W]rite [R]egisters */
1212 enum ATA_IOPORT_WR {
1213     ATA_IOPORT_WR_DATA = 0,
1214     ATA_IOPORT_WR_FEATURES = 1,
1215     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1216     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1217     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1218     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1219     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1220     ATA_IOPORT_WR_COMMAND = 7,
1221     ATA_IOPORT_WR_NUM_REGISTERS,
1222 };
1223 
1224 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1225     [ATA_IOPORT_WR_DATA] = "Data",
1226     [ATA_IOPORT_WR_FEATURES] = "Features",
1227     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1228     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1229     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1230     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1231     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1232     [ATA_IOPORT_WR_COMMAND] = "Command"
1233 };
1234 
1235 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1236 {
1237     IDEBus *bus = opaque;
1238     IDEState *s = idebus_active_if(bus);
1239     int reg_num = addr & 7;
1240 
1241     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1242 
1243     /* ignore writes to command block while busy with previous command */
1244     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1245         return;
1246     }
1247 
1248     switch (reg_num) {
1249     case 0:
1250         break;
1251     case ATA_IOPORT_WR_FEATURES:
1252         ide_clear_hob(bus);
1253         /* NOTE: data is written to the two drives */
1254         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1255         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1256         bus->ifs[0].feature = val;
1257         bus->ifs[1].feature = val;
1258         break;
1259     case ATA_IOPORT_WR_SECTOR_COUNT:
1260 	ide_clear_hob(bus);
1261 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1262 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1263         bus->ifs[0].nsector = val;
1264         bus->ifs[1].nsector = val;
1265         break;
1266     case ATA_IOPORT_WR_SECTOR_NUMBER:
1267 	ide_clear_hob(bus);
1268 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1269 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1270         bus->ifs[0].sector = val;
1271         bus->ifs[1].sector = val;
1272         break;
1273     case ATA_IOPORT_WR_CYLINDER_LOW:
1274 	ide_clear_hob(bus);
1275 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1276 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1277         bus->ifs[0].lcyl = val;
1278         bus->ifs[1].lcyl = val;
1279         break;
1280     case ATA_IOPORT_WR_CYLINDER_HIGH:
1281 	ide_clear_hob(bus);
1282 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1283 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1284         bus->ifs[0].hcyl = val;
1285         bus->ifs[1].hcyl = val;
1286         break;
1287     case ATA_IOPORT_WR_DEVICE_HEAD:
1288 	/* FIXME: HOB readback uses bit 7 */
1289         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1290         bus->ifs[1].select = (val | 0x10) | 0xa0;
1291         /* select drive */
1292         bus->unit = (val >> 4) & 1;
1293         break;
1294     default:
1295     case ATA_IOPORT_WR_COMMAND:
1296         /* command */
1297         ide_exec_cmd(bus, val);
1298         break;
1299     }
1300 }
1301 
1302 static void ide_reset(IDEState *s)
1303 {
1304     trace_ide_reset(s);
1305 
1306     if (s->pio_aiocb) {
1307         blk_aio_cancel(s->pio_aiocb);
1308         s->pio_aiocb = NULL;
1309     }
1310 
1311     if (s->drive_kind == IDE_CFATA)
1312         s->mult_sectors = 0;
1313     else
1314         s->mult_sectors = MAX_MULT_SECTORS;
1315     /* ide regs */
1316     s->feature = 0;
1317     s->error = 0;
1318     s->nsector = 0;
1319     s->sector = 0;
1320     s->lcyl = 0;
1321     s->hcyl = 0;
1322 
1323     /* lba48 */
1324     s->hob_feature = 0;
1325     s->hob_sector = 0;
1326     s->hob_nsector = 0;
1327     s->hob_lcyl = 0;
1328     s->hob_hcyl = 0;
1329 
1330     s->select = 0xa0;
1331     s->status = READY_STAT | SEEK_STAT;
1332 
1333     s->lba48 = 0;
1334 
1335     /* ATAPI specific */
1336     s->sense_key = 0;
1337     s->asc = 0;
1338     s->cdrom_changed = 0;
1339     s->packet_transfer_size = 0;
1340     s->elementary_transfer_size = 0;
1341     s->io_buffer_index = 0;
1342     s->cd_sector_size = 0;
1343     s->atapi_dma = 0;
1344     s->tray_locked = 0;
1345     s->tray_open = 0;
1346     /* ATA DMA state */
1347     s->io_buffer_size = 0;
1348     s->req_nb_sectors = 0;
1349 
1350     ide_set_signature(s);
1351     /* init the transfer handler so that 0xffff is returned on data
1352        accesses */
1353     s->end_transfer_func = ide_dummy_transfer_stop;
1354     ide_dummy_transfer_stop(s);
1355     s->media_changed = 0;
1356 }
1357 
1358 static bool cmd_nop(IDEState *s, uint8_t cmd)
1359 {
1360     return true;
1361 }
1362 
1363 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1364 {
1365     /* Halt PIO (in the DRQ phase), then DMA */
1366     ide_transfer_halt(s);
1367     ide_cancel_dma_sync(s);
1368 
1369     /* Reset any PIO commands, reset signature, etc */
1370     ide_reset(s);
1371 
1372     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1373      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1374     s->status = 0x00;
1375 
1376     /* Do not overwrite status register */
1377     return false;
1378 }
1379 
1380 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1381 {
1382     switch (s->feature) {
1383     case DSM_TRIM:
1384         if (s->blk) {
1385             ide_sector_start_dma(s, IDE_DMA_TRIM);
1386             return false;
1387         }
1388         break;
1389     }
1390 
1391     ide_abort_command(s);
1392     return true;
1393 }
1394 
1395 static bool cmd_identify(IDEState *s, uint8_t cmd)
1396 {
1397     if (s->blk && s->drive_kind != IDE_CD) {
1398         if (s->drive_kind != IDE_CFATA) {
1399             ide_identify(s);
1400         } else {
1401             ide_cfata_identify(s);
1402         }
1403         s->status = READY_STAT | SEEK_STAT;
1404         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1405         ide_set_irq(s->bus);
1406         return false;
1407     } else {
1408         if (s->drive_kind == IDE_CD) {
1409             ide_set_signature(s);
1410         }
1411         ide_abort_command(s);
1412     }
1413 
1414     return true;
1415 }
1416 
1417 static bool cmd_verify(IDEState *s, uint8_t cmd)
1418 {
1419     bool lba48 = (cmd == WIN_VERIFY_EXT);
1420 
1421     /* do sector number check ? */
1422     ide_cmd_lba48_transform(s, lba48);
1423 
1424     return true;
1425 }
1426 
1427 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1428 {
1429     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1430         /* Disable Read and Write Multiple */
1431         s->mult_sectors = 0;
1432     } else if ((s->nsector & 0xff) != 0 &&
1433         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1434          (s->nsector & (s->nsector - 1)) != 0)) {
1435         ide_abort_command(s);
1436     } else {
1437         s->mult_sectors = s->nsector & 0xff;
1438     }
1439 
1440     return true;
1441 }
1442 
1443 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1444 {
1445     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1446 
1447     if (!s->blk || !s->mult_sectors) {
1448         ide_abort_command(s);
1449         return true;
1450     }
1451 
1452     ide_cmd_lba48_transform(s, lba48);
1453     s->req_nb_sectors = s->mult_sectors;
1454     ide_sector_read(s);
1455     return false;
1456 }
1457 
1458 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1459 {
1460     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1461     int n;
1462 
1463     if (!s->blk || !s->mult_sectors) {
1464         ide_abort_command(s);
1465         return true;
1466     }
1467 
1468     ide_cmd_lba48_transform(s, lba48);
1469 
1470     s->req_nb_sectors = s->mult_sectors;
1471     n = MIN(s->nsector, s->req_nb_sectors);
1472 
1473     s->status = SEEK_STAT | READY_STAT;
1474     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1475 
1476     s->media_changed = 1;
1477 
1478     return false;
1479 }
1480 
1481 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1482 {
1483     bool lba48 = (cmd == WIN_READ_EXT);
1484 
1485     if (s->drive_kind == IDE_CD) {
1486         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1487         ide_abort_command(s);
1488         return true;
1489     }
1490 
1491     if (!s->blk) {
1492         ide_abort_command(s);
1493         return true;
1494     }
1495 
1496     ide_cmd_lba48_transform(s, lba48);
1497     s->req_nb_sectors = 1;
1498     ide_sector_read(s);
1499 
1500     return false;
1501 }
1502 
1503 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1504 {
1505     bool lba48 = (cmd == WIN_WRITE_EXT);
1506 
1507     if (!s->blk) {
1508         ide_abort_command(s);
1509         return true;
1510     }
1511 
1512     ide_cmd_lba48_transform(s, lba48);
1513 
1514     s->req_nb_sectors = 1;
1515     s->status = SEEK_STAT | READY_STAT;
1516     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1517 
1518     s->media_changed = 1;
1519 
1520     return false;
1521 }
1522 
1523 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1524 {
1525     bool lba48 = (cmd == WIN_READDMA_EXT);
1526 
1527     if (!s->blk) {
1528         ide_abort_command(s);
1529         return true;
1530     }
1531 
1532     ide_cmd_lba48_transform(s, lba48);
1533     ide_sector_start_dma(s, IDE_DMA_READ);
1534 
1535     return false;
1536 }
1537 
1538 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1539 {
1540     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1541 
1542     if (!s->blk) {
1543         ide_abort_command(s);
1544         return true;
1545     }
1546 
1547     ide_cmd_lba48_transform(s, lba48);
1548     ide_sector_start_dma(s, IDE_DMA_WRITE);
1549 
1550     s->media_changed = 1;
1551 
1552     return false;
1553 }
1554 
1555 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1556 {
1557     ide_flush_cache(s);
1558     return false;
1559 }
1560 
1561 static bool cmd_seek(IDEState *s, uint8_t cmd)
1562 {
1563     /* XXX: Check that seek is within bounds */
1564     return true;
1565 }
1566 
1567 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1568 {
1569     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1570 
1571     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1572     if (s->nb_sectors == 0) {
1573         ide_abort_command(s);
1574         return true;
1575     }
1576 
1577     ide_cmd_lba48_transform(s, lba48);
1578     ide_set_sector(s, s->nb_sectors - 1);
1579 
1580     return true;
1581 }
1582 
1583 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1584 {
1585     s->nsector = 0xff; /* device active or idle */
1586     return true;
1587 }
1588 
1589 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1590 {
1591     uint16_t *identify_data;
1592 
1593     if (!s->blk) {
1594         ide_abort_command(s);
1595         return true;
1596     }
1597 
1598     /* XXX: valid for CDROM ? */
1599     switch (s->feature) {
1600     case 0x02: /* write cache enable */
1601         blk_set_enable_write_cache(s->blk, true);
1602         identify_data = (uint16_t *)s->identify_data;
1603         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1604         return true;
1605     case 0x82: /* write cache disable */
1606         blk_set_enable_write_cache(s->blk, false);
1607         identify_data = (uint16_t *)s->identify_data;
1608         put_le16(identify_data + 85, (1 << 14) | 1);
1609         ide_flush_cache(s);
1610         return false;
1611     case 0xcc: /* reverting to power-on defaults enable */
1612     case 0x66: /* reverting to power-on defaults disable */
1613     case 0xaa: /* read look-ahead enable */
1614     case 0x55: /* read look-ahead disable */
1615     case 0x05: /* set advanced power management mode */
1616     case 0x85: /* disable advanced power management mode */
1617     case 0x69: /* NOP */
1618     case 0x67: /* NOP */
1619     case 0x96: /* NOP */
1620     case 0x9a: /* NOP */
1621     case 0x42: /* enable Automatic Acoustic Mode */
1622     case 0xc2: /* disable Automatic Acoustic Mode */
1623         return true;
1624     case 0x03: /* set transfer mode */
1625         {
1626             uint8_t val = s->nsector & 0x07;
1627             identify_data = (uint16_t *)s->identify_data;
1628 
1629             switch (s->nsector >> 3) {
1630             case 0x00: /* pio default */
1631             case 0x01: /* pio mode */
1632                 put_le16(identify_data + 62, 0x07);
1633                 put_le16(identify_data + 63, 0x07);
1634                 put_le16(identify_data + 88, 0x3f);
1635                 break;
1636             case 0x02: /* sigle word dma mode*/
1637                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1638                 put_le16(identify_data + 63, 0x07);
1639                 put_le16(identify_data + 88, 0x3f);
1640                 break;
1641             case 0x04: /* mdma mode */
1642                 put_le16(identify_data + 62, 0x07);
1643                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1644                 put_le16(identify_data + 88, 0x3f);
1645                 break;
1646             case 0x08: /* udma mode */
1647                 put_le16(identify_data + 62, 0x07);
1648                 put_le16(identify_data + 63, 0x07);
1649                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1650                 break;
1651             default:
1652                 goto abort_cmd;
1653             }
1654             return true;
1655         }
1656     }
1657 
1658 abort_cmd:
1659     ide_abort_command(s);
1660     return true;
1661 }
1662 
1663 
1664 /*** ATAPI commands ***/
1665 
1666 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1667 {
1668     ide_atapi_identify(s);
1669     s->status = READY_STAT | SEEK_STAT;
1670     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1671     ide_set_irq(s->bus);
1672     return false;
1673 }
1674 
1675 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1676 {
1677     ide_set_signature(s);
1678 
1679     if (s->drive_kind == IDE_CD) {
1680         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1681                         * devices to return a clear status register
1682                         * with READY_STAT *not* set. */
1683         s->error = 0x01;
1684     } else {
1685         s->status = READY_STAT | SEEK_STAT;
1686         /* The bits of the error register are not as usual for this command!
1687          * They are part of the regular output (this is why ERR_STAT isn't set)
1688          * Device 0 passed, Device 1 passed or not present. */
1689         s->error = 0x01;
1690         ide_set_irq(s->bus);
1691     }
1692 
1693     return false;
1694 }
1695 
1696 static bool cmd_packet(IDEState *s, uint8_t cmd)
1697 {
1698     /* overlapping commands not supported */
1699     if (s->feature & 0x02) {
1700         ide_abort_command(s);
1701         return true;
1702     }
1703 
1704     s->status = READY_STAT | SEEK_STAT;
1705     s->atapi_dma = s->feature & 1;
1706     if (s->atapi_dma) {
1707         s->dma_cmd = IDE_DMA_ATAPI;
1708     }
1709     s->nsector = 1;
1710     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1711                        ide_atapi_cmd);
1712     return false;
1713 }
1714 
1715 
1716 /*** CF-ATA commands ***/
1717 
1718 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1719 {
1720     s->error = 0x09;    /* miscellaneous error */
1721     s->status = READY_STAT | SEEK_STAT;
1722     ide_set_irq(s->bus);
1723 
1724     return false;
1725 }
1726 
1727 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1728 {
1729     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1730      * required for Windows 8 to work with AHCI */
1731 
1732     if (cmd == CFA_WEAR_LEVEL) {
1733         s->nsector = 0;
1734     }
1735 
1736     if (cmd == CFA_ERASE_SECTORS) {
1737         s->media_changed = 1;
1738     }
1739 
1740     return true;
1741 }
1742 
1743 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1744 {
1745     s->status = READY_STAT | SEEK_STAT;
1746 
1747     memset(s->io_buffer, 0, 0x200);
1748     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1749     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1750     s->io_buffer[0x02] = s->select;                 /* Head */
1751     s->io_buffer[0x03] = s->sector;                 /* Sector */
1752     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1753     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1754     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1755     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1756     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1757     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1758     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1759 
1760     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1761     ide_set_irq(s->bus);
1762 
1763     return false;
1764 }
1765 
1766 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1767 {
1768     switch (s->feature) {
1769     case 0x02:  /* Inquiry Metadata Storage */
1770         ide_cfata_metadata_inquiry(s);
1771         break;
1772     case 0x03:  /* Read Metadata Storage */
1773         ide_cfata_metadata_read(s);
1774         break;
1775     case 0x04:  /* Write Metadata Storage */
1776         ide_cfata_metadata_write(s);
1777         break;
1778     default:
1779         ide_abort_command(s);
1780         return true;
1781     }
1782 
1783     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1784     s->status = 0x00; /* NOTE: READY is _not_ set */
1785     ide_set_irq(s->bus);
1786 
1787     return false;
1788 }
1789 
1790 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1791 {
1792     switch (s->feature) {
1793     case 0x01:  /* sense temperature in device */
1794         s->nsector = 0x50;      /* +20 C */
1795         break;
1796     default:
1797         ide_abort_command(s);
1798         return true;
1799     }
1800 
1801     return true;
1802 }
1803 
1804 
1805 /*** SMART commands ***/
1806 
1807 static bool cmd_smart(IDEState *s, uint8_t cmd)
1808 {
1809     int n;
1810 
1811     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1812         goto abort_cmd;
1813     }
1814 
1815     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1816         goto abort_cmd;
1817     }
1818 
1819     switch (s->feature) {
1820     case SMART_DISABLE:
1821         s->smart_enabled = 0;
1822         return true;
1823 
1824     case SMART_ENABLE:
1825         s->smart_enabled = 1;
1826         return true;
1827 
1828     case SMART_ATTR_AUTOSAVE:
1829         switch (s->sector) {
1830         case 0x00:
1831             s->smart_autosave = 0;
1832             break;
1833         case 0xf1:
1834             s->smart_autosave = 1;
1835             break;
1836         default:
1837             goto abort_cmd;
1838         }
1839         return true;
1840 
1841     case SMART_STATUS:
1842         if (!s->smart_errors) {
1843             s->hcyl = 0xc2;
1844             s->lcyl = 0x4f;
1845         } else {
1846             s->hcyl = 0x2c;
1847             s->lcyl = 0xf4;
1848         }
1849         return true;
1850 
1851     case SMART_READ_THRESH:
1852         memset(s->io_buffer, 0, 0x200);
1853         s->io_buffer[0] = 0x01; /* smart struct version */
1854 
1855         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1856             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1857             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1858         }
1859 
1860         /* checksum */
1861         for (n = 0; n < 511; n++) {
1862             s->io_buffer[511] += s->io_buffer[n];
1863         }
1864         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1865 
1866         s->status = READY_STAT | SEEK_STAT;
1867         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1868         ide_set_irq(s->bus);
1869         return false;
1870 
1871     case SMART_READ_DATA:
1872         memset(s->io_buffer, 0, 0x200);
1873         s->io_buffer[0] = 0x01; /* smart struct version */
1874 
1875         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1876             int i;
1877             for (i = 0; i < 11; i++) {
1878                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1879             }
1880         }
1881 
1882         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1883         if (s->smart_selftest_count == 0) {
1884             s->io_buffer[363] = 0;
1885         } else {
1886             s->io_buffer[363] =
1887                 s->smart_selftest_data[3 +
1888                            (s->smart_selftest_count - 1) *
1889                            24];
1890         }
1891         s->io_buffer[364] = 0x20;
1892         s->io_buffer[365] = 0x01;
1893         /* offline data collection capacity: execute + self-test*/
1894         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1895         s->io_buffer[368] = 0x03; /* smart capability (1) */
1896         s->io_buffer[369] = 0x00; /* smart capability (2) */
1897         s->io_buffer[370] = 0x01; /* error logging supported */
1898         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1899         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1900         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1901 
1902         for (n = 0; n < 511; n++) {
1903             s->io_buffer[511] += s->io_buffer[n];
1904         }
1905         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1906 
1907         s->status = READY_STAT | SEEK_STAT;
1908         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1909         ide_set_irq(s->bus);
1910         return false;
1911 
1912     case SMART_READ_LOG:
1913         switch (s->sector) {
1914         case 0x01: /* summary smart error log */
1915             memset(s->io_buffer, 0, 0x200);
1916             s->io_buffer[0] = 0x01;
1917             s->io_buffer[1] = 0x00; /* no error entries */
1918             s->io_buffer[452] = s->smart_errors & 0xff;
1919             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1920 
1921             for (n = 0; n < 511; n++) {
1922                 s->io_buffer[511] += s->io_buffer[n];
1923             }
1924             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1925             break;
1926         case 0x06: /* smart self test log */
1927             memset(s->io_buffer, 0, 0x200);
1928             s->io_buffer[0] = 0x01;
1929             if (s->smart_selftest_count == 0) {
1930                 s->io_buffer[508] = 0;
1931             } else {
1932                 s->io_buffer[508] = s->smart_selftest_count;
1933                 for (n = 2; n < 506; n++)  {
1934                     s->io_buffer[n] = s->smart_selftest_data[n];
1935                 }
1936             }
1937 
1938             for (n = 0; n < 511; n++) {
1939                 s->io_buffer[511] += s->io_buffer[n];
1940             }
1941             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1942             break;
1943         default:
1944             goto abort_cmd;
1945         }
1946         s->status = READY_STAT | SEEK_STAT;
1947         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1948         ide_set_irq(s->bus);
1949         return false;
1950 
1951     case SMART_EXECUTE_OFFLINE:
1952         switch (s->sector) {
1953         case 0: /* off-line routine */
1954         case 1: /* short self test */
1955         case 2: /* extended self test */
1956             s->smart_selftest_count++;
1957             if (s->smart_selftest_count > 21) {
1958                 s->smart_selftest_count = 1;
1959             }
1960             n = 2 + (s->smart_selftest_count - 1) * 24;
1961             s->smart_selftest_data[n] = s->sector;
1962             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1963             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1964             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1965             break;
1966         default:
1967             goto abort_cmd;
1968         }
1969         return true;
1970     }
1971 
1972 abort_cmd:
1973     ide_abort_command(s);
1974     return true;
1975 }
1976 
1977 #define HD_OK (1u << IDE_HD)
1978 #define CD_OK (1u << IDE_CD)
1979 #define CFA_OK (1u << IDE_CFATA)
1980 #define HD_CFA_OK (HD_OK | CFA_OK)
1981 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1982 
1983 /* Set the Disk Seek Completed status bit during completion */
1984 #define SET_DSC (1u << 8)
1985 
1986 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1987 static const struct {
1988     /* Returns true if the completion code should be run */
1989     bool (*handler)(IDEState *s, uint8_t cmd);
1990     int flags;
1991 } ide_cmd_table[0x100] = {
1992     /* NOP not implemented, mandatory for CD */
1993     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1994     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1995     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1996     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1997     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1998     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1999     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2000     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2001     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2002     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2003     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2004     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2005     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2006     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2007     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2008     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2009     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2010     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2011     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2012     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2013     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2014     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2015     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2016     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2017     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2018     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2019     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2020     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2021     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2022     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2023     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2024     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2025     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2026     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2027     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2028     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2029     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2030     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2031     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2032     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2033     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2034     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2035     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2036     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2037     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2038     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2039     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2040     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2041     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2042     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2043     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2044     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2045     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2046     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2047     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2048     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2049 };
2050 
2051 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2052 {
2053     return cmd < ARRAY_SIZE(ide_cmd_table)
2054         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2055 }
2056 
2057 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2058 {
2059     IDEState *s;
2060     bool complete;
2061 
2062     s = idebus_active_if(bus);
2063     trace_ide_exec_cmd(bus, s, val);
2064 
2065     /* ignore commands to non existent slave */
2066     if (s != bus->ifs && !s->blk) {
2067         return;
2068     }
2069 
2070     /* Only RESET is allowed while BSY and/or DRQ are set,
2071      * and only to ATAPI devices. */
2072     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2073         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2074             return;
2075         }
2076     }
2077 
2078     if (!ide_cmd_permitted(s, val)) {
2079         ide_abort_command(s);
2080         ide_set_irq(s->bus);
2081         return;
2082     }
2083 
2084     s->status = READY_STAT | BUSY_STAT;
2085     s->error = 0;
2086     s->io_buffer_offset = 0;
2087 
2088     complete = ide_cmd_table[val].handler(s, val);
2089     if (complete) {
2090         s->status &= ~BUSY_STAT;
2091         assert(!!s->error == !!(s->status & ERR_STAT));
2092 
2093         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2094             s->status |= SEEK_STAT;
2095         }
2096 
2097         ide_cmd_done(s);
2098         ide_set_irq(s->bus);
2099     }
2100 }
2101 
2102 /* IOport [R]ead [R]egisters */
2103 enum ATA_IOPORT_RR {
2104     ATA_IOPORT_RR_DATA = 0,
2105     ATA_IOPORT_RR_ERROR = 1,
2106     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2107     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2108     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2109     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2110     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2111     ATA_IOPORT_RR_STATUS = 7,
2112     ATA_IOPORT_RR_NUM_REGISTERS,
2113 };
2114 
2115 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2116     [ATA_IOPORT_RR_DATA] = "Data",
2117     [ATA_IOPORT_RR_ERROR] = "Error",
2118     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2119     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2120     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2121     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2122     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2123     [ATA_IOPORT_RR_STATUS] = "Status"
2124 };
2125 
2126 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2127 {
2128     IDEBus *bus = opaque;
2129     IDEState *s = idebus_active_if(bus);
2130     uint32_t reg_num;
2131     int ret, hob;
2132 
2133     reg_num = addr & 7;
2134     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2135     //hob = s->select & (1 << 7);
2136     hob = 0;
2137     switch (reg_num) {
2138     case ATA_IOPORT_RR_DATA:
2139         ret = 0xff;
2140         break;
2141     case ATA_IOPORT_RR_ERROR:
2142         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2143             (s != bus->ifs && !s->blk)) {
2144             ret = 0;
2145         } else if (!hob) {
2146             ret = s->error;
2147         } else {
2148 	    ret = s->hob_feature;
2149         }
2150         break;
2151     case ATA_IOPORT_RR_SECTOR_COUNT:
2152         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2153             ret = 0;
2154         } else if (!hob) {
2155             ret = s->nsector & 0xff;
2156         } else {
2157 	    ret = s->hob_nsector;
2158         }
2159         break;
2160     case ATA_IOPORT_RR_SECTOR_NUMBER:
2161         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2162             ret = 0;
2163         } else if (!hob) {
2164             ret = s->sector;
2165         } else {
2166 	    ret = s->hob_sector;
2167         }
2168         break;
2169     case ATA_IOPORT_RR_CYLINDER_LOW:
2170         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2171             ret = 0;
2172         } else if (!hob) {
2173             ret = s->lcyl;
2174         } else {
2175 	    ret = s->hob_lcyl;
2176         }
2177         break;
2178     case ATA_IOPORT_RR_CYLINDER_HIGH:
2179         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2180             ret = 0;
2181         } else if (!hob) {
2182             ret = s->hcyl;
2183         } else {
2184 	    ret = s->hob_hcyl;
2185         }
2186         break;
2187     case ATA_IOPORT_RR_DEVICE_HEAD:
2188         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2189             ret = 0;
2190         } else {
2191             ret = s->select;
2192         }
2193         break;
2194     default:
2195     case ATA_IOPORT_RR_STATUS:
2196         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2197             (s != bus->ifs && !s->blk)) {
2198             ret = 0;
2199         } else {
2200             ret = s->status;
2201         }
2202         qemu_irq_lower(bus->irq);
2203         break;
2204     }
2205 
2206     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2207     return ret;
2208 }
2209 
2210 uint32_t ide_status_read(void *opaque, uint32_t addr)
2211 {
2212     IDEBus *bus = opaque;
2213     IDEState *s = idebus_active_if(bus);
2214     int ret;
2215 
2216     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2217         (s != bus->ifs && !s->blk)) {
2218         ret = 0;
2219     } else {
2220         ret = s->status;
2221     }
2222 
2223     trace_ide_status_read(addr, ret, bus, s);
2224     return ret;
2225 }
2226 
2227 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2228 {
2229     IDEBus *bus = opaque;
2230     IDEState *s;
2231     int i;
2232 
2233     trace_ide_cmd_write(addr, val, bus);
2234 
2235     /* common for both drives */
2236     if (!(bus->cmd & IDE_CMD_RESET) &&
2237         (val & IDE_CMD_RESET)) {
2238         /* reset low to high */
2239         for(i = 0;i < 2; i++) {
2240             s = &bus->ifs[i];
2241             s->status = BUSY_STAT | SEEK_STAT;
2242             s->error = 0x01;
2243         }
2244     } else if ((bus->cmd & IDE_CMD_RESET) &&
2245                !(val & IDE_CMD_RESET)) {
2246         /* high to low */
2247         for(i = 0;i < 2; i++) {
2248             s = &bus->ifs[i];
2249             if (s->drive_kind == IDE_CD)
2250                 s->status = 0x00; /* NOTE: READY is _not_ set */
2251             else
2252                 s->status = READY_STAT | SEEK_STAT;
2253             ide_set_signature(s);
2254         }
2255     }
2256 
2257     bus->cmd = val;
2258 }
2259 
2260 /*
2261  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2262  * transferred from the device to the guest), false if it's a PIO in
2263  */
2264 static bool ide_is_pio_out(IDEState *s)
2265 {
2266     if (s->end_transfer_func == ide_sector_write ||
2267         s->end_transfer_func == ide_atapi_cmd) {
2268         return false;
2269     } else if (s->end_transfer_func == ide_sector_read ||
2270                s->end_transfer_func == ide_transfer_stop ||
2271                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2272                s->end_transfer_func == ide_dummy_transfer_stop) {
2273         return true;
2274     }
2275 
2276     abort();
2277 }
2278 
2279 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2280 {
2281     IDEBus *bus = opaque;
2282     IDEState *s = idebus_active_if(bus);
2283     uint8_t *p;
2284 
2285     trace_ide_data_writew(addr, val, bus, s);
2286 
2287     /* PIO data access allowed only when DRQ bit is set. The result of a write
2288      * during PIO out is indeterminate, just ignore it. */
2289     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2290         return;
2291     }
2292 
2293     p = s->data_ptr;
2294     if (p + 2 > s->data_end) {
2295         return;
2296     }
2297 
2298     *(uint16_t *)p = le16_to_cpu(val);
2299     p += 2;
2300     s->data_ptr = p;
2301     if (p >= s->data_end) {
2302         s->status &= ~DRQ_STAT;
2303         s->end_transfer_func(s);
2304     }
2305 }
2306 
2307 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2308 {
2309     IDEBus *bus = opaque;
2310     IDEState *s = idebus_active_if(bus);
2311     uint8_t *p;
2312     int ret;
2313 
2314     /* PIO data access allowed only when DRQ bit is set. The result of a read
2315      * during PIO in is indeterminate, return 0 and don't move forward. */
2316     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2317         return 0;
2318     }
2319 
2320     p = s->data_ptr;
2321     if (p + 2 > s->data_end) {
2322         return 0;
2323     }
2324 
2325     ret = cpu_to_le16(*(uint16_t *)p);
2326     p += 2;
2327     s->data_ptr = p;
2328     if (p >= s->data_end) {
2329         s->status &= ~DRQ_STAT;
2330         s->end_transfer_func(s);
2331     }
2332 
2333     trace_ide_data_readw(addr, ret, bus, s);
2334     return ret;
2335 }
2336 
2337 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2338 {
2339     IDEBus *bus = opaque;
2340     IDEState *s = idebus_active_if(bus);
2341     uint8_t *p;
2342 
2343     trace_ide_data_writel(addr, val, bus, s);
2344 
2345     /* PIO data access allowed only when DRQ bit is set. The result of a write
2346      * during PIO out is indeterminate, just ignore it. */
2347     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2348         return;
2349     }
2350 
2351     p = s->data_ptr;
2352     if (p + 4 > s->data_end) {
2353         return;
2354     }
2355 
2356     *(uint32_t *)p = le32_to_cpu(val);
2357     p += 4;
2358     s->data_ptr = p;
2359     if (p >= s->data_end) {
2360         s->status &= ~DRQ_STAT;
2361         s->end_transfer_func(s);
2362     }
2363 }
2364 
2365 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2366 {
2367     IDEBus *bus = opaque;
2368     IDEState *s = idebus_active_if(bus);
2369     uint8_t *p;
2370     int ret;
2371 
2372     /* PIO data access allowed only when DRQ bit is set. The result of a read
2373      * during PIO in is indeterminate, return 0 and don't move forward. */
2374     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2375         ret = 0;
2376         goto out;
2377     }
2378 
2379     p = s->data_ptr;
2380     if (p + 4 > s->data_end) {
2381         return 0;
2382     }
2383 
2384     ret = cpu_to_le32(*(uint32_t *)p);
2385     p += 4;
2386     s->data_ptr = p;
2387     if (p >= s->data_end) {
2388         s->status &= ~DRQ_STAT;
2389         s->end_transfer_func(s);
2390     }
2391 
2392 out:
2393     trace_ide_data_readl(addr, ret, bus, s);
2394     return ret;
2395 }
2396 
2397 static void ide_dummy_transfer_stop(IDEState *s)
2398 {
2399     s->data_ptr = s->io_buffer;
2400     s->data_end = s->io_buffer;
2401     s->io_buffer[0] = 0xff;
2402     s->io_buffer[1] = 0xff;
2403     s->io_buffer[2] = 0xff;
2404     s->io_buffer[3] = 0xff;
2405 }
2406 
2407 void ide_bus_reset(IDEBus *bus)
2408 {
2409     bus->unit = 0;
2410     bus->cmd = 0;
2411     ide_reset(&bus->ifs[0]);
2412     ide_reset(&bus->ifs[1]);
2413     ide_clear_hob(bus);
2414 
2415     /* pending async DMA */
2416     if (bus->dma->aiocb) {
2417         trace_ide_bus_reset_aio();
2418         blk_aio_cancel(bus->dma->aiocb);
2419         bus->dma->aiocb = NULL;
2420     }
2421 
2422     /* reset dma provider too */
2423     if (bus->dma->ops->reset) {
2424         bus->dma->ops->reset(bus->dma);
2425     }
2426 }
2427 
2428 static bool ide_cd_is_tray_open(void *opaque)
2429 {
2430     return ((IDEState *)opaque)->tray_open;
2431 }
2432 
2433 static bool ide_cd_is_medium_locked(void *opaque)
2434 {
2435     return ((IDEState *)opaque)->tray_locked;
2436 }
2437 
2438 static void ide_resize_cb(void *opaque)
2439 {
2440     IDEState *s = opaque;
2441     uint64_t nb_sectors;
2442 
2443     if (!s->identify_set) {
2444         return;
2445     }
2446 
2447     blk_get_geometry(s->blk, &nb_sectors);
2448     s->nb_sectors = nb_sectors;
2449 
2450     /* Update the identify data buffer. */
2451     if (s->drive_kind == IDE_CFATA) {
2452         ide_cfata_identify_size(s);
2453     } else {
2454         /* IDE_CD uses a different set of callbacks entirely. */
2455         assert(s->drive_kind != IDE_CD);
2456         ide_identify_size(s);
2457     }
2458 }
2459 
2460 static const BlockDevOps ide_cd_block_ops = {
2461     .change_media_cb = ide_cd_change_cb,
2462     .eject_request_cb = ide_cd_eject_request_cb,
2463     .is_tray_open = ide_cd_is_tray_open,
2464     .is_medium_locked = ide_cd_is_medium_locked,
2465 };
2466 
2467 static const BlockDevOps ide_hd_block_ops = {
2468     .resize_cb = ide_resize_cb,
2469 };
2470 
2471 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2472                    const char *version, const char *serial, const char *model,
2473                    uint64_t wwn,
2474                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2475                    int chs_trans, Error **errp)
2476 {
2477     uint64_t nb_sectors;
2478 
2479     s->blk = blk;
2480     s->drive_kind = kind;
2481 
2482     blk_get_geometry(blk, &nb_sectors);
2483     s->cylinders = cylinders;
2484     s->heads = heads;
2485     s->sectors = secs;
2486     s->chs_trans = chs_trans;
2487     s->nb_sectors = nb_sectors;
2488     s->wwn = wwn;
2489     /* The SMART values should be preserved across power cycles
2490        but they aren't.  */
2491     s->smart_enabled = 1;
2492     s->smart_autosave = 1;
2493     s->smart_errors = 0;
2494     s->smart_selftest_count = 0;
2495     if (kind == IDE_CD) {
2496         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2497         blk_set_guest_block_size(blk, 2048);
2498     } else {
2499         if (!blk_is_inserted(s->blk)) {
2500             error_setg(errp, "Device needs media, but drive is empty");
2501             return -1;
2502         }
2503         if (blk_is_read_only(blk)) {
2504             error_setg(errp, "Can't use a read-only drive");
2505             return -1;
2506         }
2507         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2508     }
2509     if (serial) {
2510         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2511     } else {
2512         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2513                  "QM%05d", s->drive_serial);
2514     }
2515     if (model) {
2516         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2517     } else {
2518         switch (kind) {
2519         case IDE_CD:
2520             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2521             break;
2522         case IDE_CFATA:
2523             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2524             break;
2525         default:
2526             strcpy(s->drive_model_str, "QEMU HARDDISK");
2527             break;
2528         }
2529     }
2530 
2531     if (version) {
2532         pstrcpy(s->version, sizeof(s->version), version);
2533     } else {
2534         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2535     }
2536 
2537     ide_reset(s);
2538     blk_iostatus_enable(blk);
2539     return 0;
2540 }
2541 
2542 static void ide_init1(IDEBus *bus, int unit)
2543 {
2544     static int drive_serial = 1;
2545     IDEState *s = &bus->ifs[unit];
2546 
2547     s->bus = bus;
2548     s->unit = unit;
2549     s->drive_serial = drive_serial++;
2550     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2551     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2552     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2553     memset(s->io_buffer, 0, s->io_buffer_total_len);
2554 
2555     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2556     memset(s->smart_selftest_data, 0, 512);
2557 
2558     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2559                                            ide_sector_write_timer_cb, s);
2560 }
2561 
2562 static int ide_nop_int(IDEDMA *dma, int x)
2563 {
2564     return 0;
2565 }
2566 
2567 static void ide_nop(IDEDMA *dma)
2568 {
2569 }
2570 
2571 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2572 {
2573     return 0;
2574 }
2575 
2576 static const IDEDMAOps ide_dma_nop_ops = {
2577     .prepare_buf    = ide_nop_int32,
2578     .restart_dma    = ide_nop,
2579     .rw_buf         = ide_nop_int,
2580 };
2581 
2582 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2583 {
2584     s->unit = s->bus->retry_unit;
2585     ide_set_sector(s, s->bus->retry_sector_num);
2586     s->nsector = s->bus->retry_nsector;
2587     s->bus->dma->ops->restart_dma(s->bus->dma);
2588     s->io_buffer_size = 0;
2589     s->dma_cmd = dma_cmd;
2590     ide_start_dma(s, ide_dma_cb);
2591 }
2592 
2593 static void ide_restart_bh(void *opaque)
2594 {
2595     IDEBus *bus = opaque;
2596     IDEState *s;
2597     bool is_read;
2598     int error_status;
2599 
2600     qemu_bh_delete(bus->bh);
2601     bus->bh = NULL;
2602 
2603     error_status = bus->error_status;
2604     if (bus->error_status == 0) {
2605         return;
2606     }
2607 
2608     s = idebus_active_if(bus);
2609     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2610 
2611     /* The error status must be cleared before resubmitting the request: The
2612      * request may fail again, and this case can only be distinguished if the
2613      * called function can set a new error status. */
2614     bus->error_status = 0;
2615 
2616     /* The HBA has generically asked to be kicked on retry */
2617     if (error_status & IDE_RETRY_HBA) {
2618         if (s->bus->dma->ops->restart) {
2619             s->bus->dma->ops->restart(s->bus->dma);
2620         }
2621     } else if (IS_IDE_RETRY_DMA(error_status)) {
2622         if (error_status & IDE_RETRY_TRIM) {
2623             ide_restart_dma(s, IDE_DMA_TRIM);
2624         } else {
2625             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2626         }
2627     } else if (IS_IDE_RETRY_PIO(error_status)) {
2628         if (is_read) {
2629             ide_sector_read(s);
2630         } else {
2631             ide_sector_write(s);
2632         }
2633     } else if (error_status & IDE_RETRY_FLUSH) {
2634         ide_flush_cache(s);
2635     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2636         assert(s->end_transfer_func == ide_atapi_cmd);
2637         ide_atapi_dma_restart(s);
2638     } else {
2639         abort();
2640     }
2641 }
2642 
2643 static void ide_restart_cb(void *opaque, int running, RunState state)
2644 {
2645     IDEBus *bus = opaque;
2646 
2647     if (!running)
2648         return;
2649 
2650     if (!bus->bh) {
2651         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2652         qemu_bh_schedule(bus->bh);
2653     }
2654 }
2655 
2656 void ide_register_restart_cb(IDEBus *bus)
2657 {
2658     if (bus->dma->ops->restart_dma) {
2659         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2660     }
2661 }
2662 
2663 static IDEDMA ide_dma_nop = {
2664     .ops = &ide_dma_nop_ops,
2665     .aiocb = NULL,
2666 };
2667 
2668 void ide_init2(IDEBus *bus, qemu_irq irq)
2669 {
2670     int i;
2671 
2672     for(i = 0; i < 2; i++) {
2673         ide_init1(bus, i);
2674         ide_reset(&bus->ifs[i]);
2675     }
2676     bus->irq = irq;
2677     bus->dma = &ide_dma_nop;
2678 }
2679 
2680 void ide_exit(IDEState *s)
2681 {
2682     timer_del(s->sector_write_timer);
2683     timer_free(s->sector_write_timer);
2684     qemu_vfree(s->smart_selftest_data);
2685     qemu_vfree(s->io_buffer);
2686 }
2687 
2688 static const MemoryRegionPortio ide_portio_list[] = {
2689     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2690     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2691     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2692     PORTIO_END_OF_LIST(),
2693 };
2694 
2695 static const MemoryRegionPortio ide_portio2_list[] = {
2696     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2697     PORTIO_END_OF_LIST(),
2698 };
2699 
2700 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2701 {
2702     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2703        bridge has been setup properly to always register with ISA.  */
2704     isa_register_portio_list(dev, &bus->portio_list,
2705                              iobase, ide_portio_list, bus, "ide");
2706 
2707     if (iobase2) {
2708         isa_register_portio_list(dev, &bus->portio2_list,
2709                                  iobase2, ide_portio2_list, bus, "ide");
2710     }
2711 }
2712 
2713 static bool is_identify_set(void *opaque, int version_id)
2714 {
2715     IDEState *s = opaque;
2716 
2717     return s->identify_set != 0;
2718 }
2719 
2720 static EndTransferFunc* transfer_end_table[] = {
2721         ide_sector_read,
2722         ide_sector_write,
2723         ide_transfer_stop,
2724         ide_atapi_cmd_reply_end,
2725         ide_atapi_cmd,
2726         ide_dummy_transfer_stop,
2727 };
2728 
2729 static int transfer_end_table_idx(EndTransferFunc *fn)
2730 {
2731     int i;
2732 
2733     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2734         if (transfer_end_table[i] == fn)
2735             return i;
2736 
2737     return -1;
2738 }
2739 
2740 static int ide_drive_post_load(void *opaque, int version_id)
2741 {
2742     IDEState *s = opaque;
2743 
2744     if (s->blk && s->identify_set) {
2745         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2746     }
2747     return 0;
2748 }
2749 
2750 static int ide_drive_pio_post_load(void *opaque, int version_id)
2751 {
2752     IDEState *s = opaque;
2753 
2754     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2755         return -EINVAL;
2756     }
2757     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2758     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2759     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2760     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2761 
2762     return 0;
2763 }
2764 
2765 static int ide_drive_pio_pre_save(void *opaque)
2766 {
2767     IDEState *s = opaque;
2768     int idx;
2769 
2770     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2771     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2772 
2773     idx = transfer_end_table_idx(s->end_transfer_func);
2774     if (idx == -1) {
2775         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2776                         __func__);
2777         s->end_transfer_fn_idx = 2;
2778     } else {
2779         s->end_transfer_fn_idx = idx;
2780     }
2781 
2782     return 0;
2783 }
2784 
2785 static bool ide_drive_pio_state_needed(void *opaque)
2786 {
2787     IDEState *s = opaque;
2788 
2789     return ((s->status & DRQ_STAT) != 0)
2790         || (s->bus->error_status & IDE_RETRY_PIO);
2791 }
2792 
2793 static bool ide_tray_state_needed(void *opaque)
2794 {
2795     IDEState *s = opaque;
2796 
2797     return s->tray_open || s->tray_locked;
2798 }
2799 
2800 static bool ide_atapi_gesn_needed(void *opaque)
2801 {
2802     IDEState *s = opaque;
2803 
2804     return s->events.new_media || s->events.eject_request;
2805 }
2806 
2807 static bool ide_error_needed(void *opaque)
2808 {
2809     IDEBus *bus = opaque;
2810 
2811     return (bus->error_status != 0);
2812 }
2813 
2814 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2815 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2816     .name ="ide_drive/atapi/gesn_state",
2817     .version_id = 1,
2818     .minimum_version_id = 1,
2819     .needed = ide_atapi_gesn_needed,
2820     .fields = (VMStateField[]) {
2821         VMSTATE_BOOL(events.new_media, IDEState),
2822         VMSTATE_BOOL(events.eject_request, IDEState),
2823         VMSTATE_END_OF_LIST()
2824     }
2825 };
2826 
2827 static const VMStateDescription vmstate_ide_tray_state = {
2828     .name = "ide_drive/tray_state",
2829     .version_id = 1,
2830     .minimum_version_id = 1,
2831     .needed = ide_tray_state_needed,
2832     .fields = (VMStateField[]) {
2833         VMSTATE_BOOL(tray_open, IDEState),
2834         VMSTATE_BOOL(tray_locked, IDEState),
2835         VMSTATE_END_OF_LIST()
2836     }
2837 };
2838 
2839 static const VMStateDescription vmstate_ide_drive_pio_state = {
2840     .name = "ide_drive/pio_state",
2841     .version_id = 1,
2842     .minimum_version_id = 1,
2843     .pre_save = ide_drive_pio_pre_save,
2844     .post_load = ide_drive_pio_post_load,
2845     .needed = ide_drive_pio_state_needed,
2846     .fields = (VMStateField[]) {
2847         VMSTATE_INT32(req_nb_sectors, IDEState),
2848         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2849 			     vmstate_info_uint8, uint8_t),
2850         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2851         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2852         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2853         VMSTATE_INT32(elementary_transfer_size, IDEState),
2854         VMSTATE_INT32(packet_transfer_size, IDEState),
2855         VMSTATE_END_OF_LIST()
2856     }
2857 };
2858 
2859 const VMStateDescription vmstate_ide_drive = {
2860     .name = "ide_drive",
2861     .version_id = 3,
2862     .minimum_version_id = 0,
2863     .post_load = ide_drive_post_load,
2864     .fields = (VMStateField[]) {
2865         VMSTATE_INT32(mult_sectors, IDEState),
2866         VMSTATE_INT32(identify_set, IDEState),
2867         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2868         VMSTATE_UINT8(feature, IDEState),
2869         VMSTATE_UINT8(error, IDEState),
2870         VMSTATE_UINT32(nsector, IDEState),
2871         VMSTATE_UINT8(sector, IDEState),
2872         VMSTATE_UINT8(lcyl, IDEState),
2873         VMSTATE_UINT8(hcyl, IDEState),
2874         VMSTATE_UINT8(hob_feature, IDEState),
2875         VMSTATE_UINT8(hob_sector, IDEState),
2876         VMSTATE_UINT8(hob_nsector, IDEState),
2877         VMSTATE_UINT8(hob_lcyl, IDEState),
2878         VMSTATE_UINT8(hob_hcyl, IDEState),
2879         VMSTATE_UINT8(select, IDEState),
2880         VMSTATE_UINT8(status, IDEState),
2881         VMSTATE_UINT8(lba48, IDEState),
2882         VMSTATE_UINT8(sense_key, IDEState),
2883         VMSTATE_UINT8(asc, IDEState),
2884         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2885         VMSTATE_END_OF_LIST()
2886     },
2887     .subsections = (const VMStateDescription*[]) {
2888         &vmstate_ide_drive_pio_state,
2889         &vmstate_ide_tray_state,
2890         &vmstate_ide_atapi_gesn_state,
2891         NULL
2892     }
2893 };
2894 
2895 static const VMStateDescription vmstate_ide_error_status = {
2896     .name ="ide_bus/error",
2897     .version_id = 2,
2898     .minimum_version_id = 1,
2899     .needed = ide_error_needed,
2900     .fields = (VMStateField[]) {
2901         VMSTATE_INT32(error_status, IDEBus),
2902         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2903         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2904         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2905         VMSTATE_END_OF_LIST()
2906     }
2907 };
2908 
2909 const VMStateDescription vmstate_ide_bus = {
2910     .name = "ide_bus",
2911     .version_id = 1,
2912     .minimum_version_id = 1,
2913     .fields = (VMStateField[]) {
2914         VMSTATE_UINT8(cmd, IDEBus),
2915         VMSTATE_UINT8(unit, IDEBus),
2916         VMSTATE_END_OF_LIST()
2917     },
2918     .subsections = (const VMStateDescription*[]) {
2919         &vmstate_ide_error_status,
2920         NULL
2921     }
2922 };
2923 
2924 void ide_drive_get(DriveInfo **hd, int n)
2925 {
2926     int i;
2927 
2928     for (i = 0; i < n; i++) {
2929         hd[i] = drive_get_by_index(IF_IDE, i);
2930     }
2931 }
2932